Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.8/0108-3.8.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2160 - (hide annotations) (download)
Tue Apr 30 12:25:39 2013 UTC (11 years ago) by niro
File size: 57030 byte(s)
-linux-3.8.9
1 niro 2160 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2     index f9e8657..23fa6a2 100644
3     --- a/arch/arm/kernel/perf_event.c
4     +++ b/arch/arm/kernel/perf_event.c
5     @@ -261,7 +261,10 @@ validate_event(struct pmu_hw_events *hw_events,
6     struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
7     struct pmu *leader_pmu = event->group_leader->pmu;
8    
9     - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
10     + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
11     + return 1;
12     +
13     + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
14     return 1;
15    
16     return armpmu->get_event_idx(hw_events, event) >= 0;
17     diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
18     index 0edce4b..5e3ca7a 100644
19     --- a/arch/arm/mach-imx/clk-imx35.c
20     +++ b/arch/arm/mach-imx/clk-imx35.c
21     @@ -265,6 +265,8 @@ int __init mx35_clocks_init()
22     clk_prepare_enable(clk[gpio3_gate]);
23     clk_prepare_enable(clk[iim_gate]);
24     clk_prepare_enable(clk[emi_gate]);
25     + clk_prepare_enable(clk[max_gate]);
26     + clk_prepare_enable(clk[iomuxc_gate]);
27    
28     /*
29     * SCC is needed to boot via mmc after a watchdog reset. The clock code
30     diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
31     index dd3d591..48bc3c0 100644
32     --- a/arch/arm/mm/cache-feroceon-l2.c
33     +++ b/arch/arm/mm/cache-feroceon-l2.c
34     @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
35     outer_cache.inv_range = feroceon_l2_inv_range;
36     outer_cache.clean_range = feroceon_l2_clean_range;
37     outer_cache.flush_range = feroceon_l2_flush_range;
38     + outer_cache.inv_all = l2_inv_all;
39    
40     enable_l2();
41    
42     diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
43     index 2c3b942..2556cf1 100644
44     --- a/arch/arm/mm/proc-arm920.S
45     +++ b/arch/arm/mm/proc-arm920.S
46     @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
47     /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
48     .globl cpu_arm920_suspend_size
49     .equ cpu_arm920_suspend_size, 4 * 3
50     -#ifdef CONFIG_PM_SLEEP
51     +#ifdef CONFIG_ARM_CPU_SUSPEND
52     ENTRY(cpu_arm920_do_suspend)
53     stmfd sp!, {r4 - r6, lr}
54     mrc p15, 0, r4, c13, c0, 0 @ PID
55     diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
56     index f1803f7e..344c8a5 100644
57     --- a/arch/arm/mm/proc-arm926.S
58     +++ b/arch/arm/mm/proc-arm926.S
59     @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
60     /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
61     .globl cpu_arm926_suspend_size
62     .equ cpu_arm926_suspend_size, 4 * 3
63     -#ifdef CONFIG_PM_SLEEP
64     +#ifdef CONFIG_ARM_CPU_SUSPEND
65     ENTRY(cpu_arm926_do_suspend)
66     stmfd sp!, {r4 - r6, lr}
67     mrc p15, 0, r4, c13, c0, 0 @ PID
68     diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
69     index 82f9cdc..0b60dd3 100644
70     --- a/arch/arm/mm/proc-mohawk.S
71     +++ b/arch/arm/mm/proc-mohawk.S
72     @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
73    
74     .globl cpu_mohawk_suspend_size
75     .equ cpu_mohawk_suspend_size, 4 * 6
76     -#ifdef CONFIG_PM_SLEEP
77     +#ifdef CONFIG_ARM_CPU_SUSPEND
78     ENTRY(cpu_mohawk_do_suspend)
79     stmfd sp!, {r4 - r9, lr}
80     mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
81     diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
82     index 3aa0da1..d92dfd0 100644
83     --- a/arch/arm/mm/proc-sa1100.S
84     +++ b/arch/arm/mm/proc-sa1100.S
85     @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
86    
87     .globl cpu_sa1100_suspend_size
88     .equ cpu_sa1100_suspend_size, 4 * 3
89     -#ifdef CONFIG_PM_SLEEP
90     +#ifdef CONFIG_ARM_CPU_SUSPEND
91     ENTRY(cpu_sa1100_do_suspend)
92     stmfd sp!, {r4 - r6, lr}
93     mrc p15, 0, r4, c3, c0, 0 @ domain ID
94     diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
95     index 09c5233..d222215 100644
96     --- a/arch/arm/mm/proc-v6.S
97     +++ b/arch/arm/mm/proc-v6.S
98     @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
99     /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
100     .globl cpu_v6_suspend_size
101     .equ cpu_v6_suspend_size, 4 * 6
102     -#ifdef CONFIG_PM_SLEEP
103     +#ifdef CONFIG_ARM_CPU_SUSPEND
104     ENTRY(cpu_v6_do_suspend)
105     stmfd sp!, {r4 - r9, lr}
106     mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
107     diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
108     index eb93d64..e8efd83 100644
109     --- a/arch/arm/mm/proc-xsc3.S
110     +++ b/arch/arm/mm/proc-xsc3.S
111     @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
112    
113     .globl cpu_xsc3_suspend_size
114     .equ cpu_xsc3_suspend_size, 4 * 6
115     -#ifdef CONFIG_PM_SLEEP
116     +#ifdef CONFIG_ARM_CPU_SUSPEND
117     ENTRY(cpu_xsc3_do_suspend)
118     stmfd sp!, {r4 - r9, lr}
119     mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
120     diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
121     index 2551036..e766f88 100644
122     --- a/arch/arm/mm/proc-xscale.S
123     +++ b/arch/arm/mm/proc-xscale.S
124     @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
125    
126     .globl cpu_xscale_suspend_size
127     .equ cpu_xscale_suspend_size, 4 * 6
128     -#ifdef CONFIG_PM_SLEEP
129     +#ifdef CONFIG_ARM_CPU_SUSPEND
130     ENTRY(cpu_xscale_do_suspend)
131     stmfd sp!, {r4 - r9, lr}
132     mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
133     diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
134     index dbaec94..21bff32 100644
135     --- a/arch/mips/include/asm/page.h
136     +++ b/arch/mips/include/asm/page.h
137     @@ -31,7 +31,7 @@
138     #define PAGE_SHIFT 16
139     #endif
140     #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
141     -#define PAGE_MASK (~(PAGE_SIZE - 1))
142     +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
143    
144     #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
145     #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
146     diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
147     index 3d990d3..e0822a3 100644
148     --- a/arch/powerpc/kernel/entry_64.S
149     +++ b/arch/powerpc/kernel/entry_64.S
150     @@ -634,7 +634,7 @@ resume_kernel:
151     /* Clear _TIF_EMULATE_STACK_STORE flag */
152     lis r11,_TIF_EMULATE_STACK_STORE@h
153     addi r5,r9,TI_FLAGS
154     - ldarx r4,0,r5
155     +0: ldarx r4,0,r5
156     andc r4,r4,r11
157     stdcx. r4,0,r5
158     bne- 0b
159     diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
160     index 1f89d26..2f4baa0 100644
161     --- a/arch/powerpc/kvm/e500mc.c
162     +++ b/arch/powerpc/kvm/e500mc.c
163     @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
164     {
165     }
166    
167     +static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
168     +
169     void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
170     {
171     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
172     @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
173     mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
174     mtspr(SPRN_GESR, vcpu->arch.shared->esr);
175    
176     - if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
177     + if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
178     + __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
179     kvmppc_e500_tlbil_all(vcpu_e500);
180     + __get_cpu_var(last_vcpu_on_cpu) = vcpu;
181     + }
182    
183     kvmppc_load_guest_fp(vcpu);
184     }
185     diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
186     index 27cb321..379d96e 100644
187     --- a/arch/s390/include/asm/io.h
188     +++ b/arch/s390/include/asm/io.h
189     @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
190     #define ioremap_nocache(addr, size) ioremap(addr, size)
191     #define ioremap_wc ioremap_nocache
192    
193     -/* TODO: s390 cannot support io_remap_pfn_range... */
194     -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
195     - remap_pfn_range(vma, vaddr, pfn, size, prot)
196     -
197     static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
198     {
199     return (void __iomem *) offset;
200     diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
201     index 098adbb..1532d7f 100644
202     --- a/arch/s390/include/asm/pgtable.h
203     +++ b/arch/s390/include/asm/pgtable.h
204     @@ -56,6 +56,10 @@ extern unsigned long zero_page_mask;
205     (((unsigned long)(vaddr)) &zero_page_mask))))
206     #define __HAVE_COLOR_ZERO_PAGE
207    
208     +/* TODO: s390 cannot support io_remap_pfn_range... */
209     +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
210     + remap_pfn_range(vma, vaddr, pfn, size, prot)
211     +
212     #endif /* !__ASSEMBLY__ */
213    
214     /*
215     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
216     index dc87b65..85039f9 100644
217     --- a/arch/x86/include/asm/kvm_host.h
218     +++ b/arch/x86/include/asm/kvm_host.h
219     @@ -419,8 +419,8 @@ struct kvm_vcpu_arch {
220     gpa_t time;
221     struct pvclock_vcpu_time_info hv_clock;
222     unsigned int hw_tsc_khz;
223     - unsigned int time_offset;
224     - struct page *time_page;
225     + struct gfn_to_hva_cache pv_time;
226     + bool pv_time_enabled;
227     /* set guest stopped flag in pvclock flags field */
228     bool pvclock_set_guest_stopped_request;
229    
230     diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
231     index 4914e94..70602f8 100644
232     --- a/arch/x86/kernel/cpu/perf_event_intel.c
233     +++ b/arch/x86/kernel/cpu/perf_event_intel.c
234     @@ -128,8 +128,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
235     };
236    
237     static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
238     - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
239     - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
240     + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
241     + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
242     + EVENT_EXTRA_END
243     +};
244     +
245     +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
246     + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
247     + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
248     EVENT_EXTRA_END
249     };
250    
251     @@ -2072,7 +2078,10 @@ __init int intel_pmu_init(void)
252     x86_pmu.event_constraints = intel_snb_event_constraints;
253     x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
254     x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
255     - x86_pmu.extra_regs = intel_snb_extra_regs;
256     + if (boot_cpu_data.x86_model == 45)
257     + x86_pmu.extra_regs = intel_snbep_extra_regs;
258     + else
259     + x86_pmu.extra_regs = intel_snb_extra_regs;
260     /* all extra regs are per-cpu when HT is on */
261     x86_pmu.er_flags |= ERF_HAS_RSP_1;
262     x86_pmu.er_flags |= ERF_NO_HT_SHARING;
263     @@ -2098,7 +2107,10 @@ __init int intel_pmu_init(void)
264     x86_pmu.event_constraints = intel_snb_event_constraints;
265     x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
266     x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
267     - x86_pmu.extra_regs = intel_snb_extra_regs;
268     + if (boot_cpu_data.x86_model == 62)
269     + x86_pmu.extra_regs = intel_snbep_extra_regs;
270     + else
271     + x86_pmu.extra_regs = intel_snb_extra_regs;
272     /* all extra regs are per-cpu when HT is on */
273     x86_pmu.er_flags |= ERF_HAS_RSP_1;
274     x86_pmu.er_flags |= ERF_NO_HT_SHARING;
275     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
276     index 9392f52..a2f492c 100644
277     --- a/arch/x86/kvm/lapic.c
278     +++ b/arch/x86/kvm/lapic.c
279     @@ -1781,7 +1781,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
280     if (!pv_eoi_enabled(vcpu))
281     return 0;
282     return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
283     - addr);
284     + addr, sizeof(u8));
285     }
286    
287     void kvm_lapic_init(void)
288     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
289     index c243b81..9a51121 100644
290     --- a/arch/x86/kvm/x86.c
291     +++ b/arch/x86/kvm/x86.c
292     @@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
293     unsigned long flags, this_tsc_khz;
294     struct kvm_vcpu_arch *vcpu = &v->arch;
295     struct kvm_arch *ka = &v->kvm->arch;
296     - void *shared_kaddr;
297     s64 kernel_ns, max_kernel_ns;
298     u64 tsc_timestamp, host_tsc;
299     - struct pvclock_vcpu_time_info *guest_hv_clock;
300     + struct pvclock_vcpu_time_info guest_hv_clock;
301     u8 pvclock_flags;
302     bool use_master_clock;
303    
304     @@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
305    
306     local_irq_restore(flags);
307    
308     - if (!vcpu->time_page)
309     + if (!vcpu->pv_time_enabled)
310     return 0;
311    
312     /*
313     @@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
314     */
315     vcpu->hv_clock.version += 2;
316    
317     - shared_kaddr = kmap_atomic(vcpu->time_page);
318     -
319     - guest_hv_clock = shared_kaddr + vcpu->time_offset;
320     + if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
321     + &guest_hv_clock, sizeof(guest_hv_clock))))
322     + return 0;
323    
324     /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
325     - pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
326     + pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
327    
328     if (vcpu->pvclock_set_guest_stopped_request) {
329     pvclock_flags |= PVCLOCK_GUEST_STOPPED;
330     @@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
331    
332     vcpu->hv_clock.flags = pvclock_flags;
333    
334     - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
335     - sizeof(vcpu->hv_clock));
336     -
337     - kunmap_atomic(shared_kaddr);
338     -
339     - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
340     + kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
341     + &vcpu->hv_clock,
342     + sizeof(vcpu->hv_clock));
343     return 0;
344     }
345    
346     @@ -1829,7 +1825,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
347     return 0;
348     }
349    
350     - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
351     + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
352     + sizeof(u32)))
353     return 1;
354    
355     vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
356     @@ -1839,10 +1836,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
357    
358     static void kvmclock_reset(struct kvm_vcpu *vcpu)
359     {
360     - if (vcpu->arch.time_page) {
361     - kvm_release_page_dirty(vcpu->arch.time_page);
362     - vcpu->arch.time_page = NULL;
363     - }
364     + vcpu->arch.pv_time_enabled = false;
365     }
366    
367     static void accumulate_steal_time(struct kvm_vcpu *vcpu)
368     @@ -1948,6 +1942,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
369     break;
370     case MSR_KVM_SYSTEM_TIME_NEW:
371     case MSR_KVM_SYSTEM_TIME: {
372     + u64 gpa_offset;
373     kvmclock_reset(vcpu);
374    
375     vcpu->arch.time = data;
376     @@ -1957,14 +1952,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
377     if (!(data & 1))
378     break;
379    
380     - /* ...but clean it before doing the actual write */
381     - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
382     -
383     - vcpu->arch.time_page =
384     - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
385     + gpa_offset = data & ~(PAGE_MASK | 1);
386    
387     - if (is_error_page(vcpu->arch.time_page))
388     - vcpu->arch.time_page = NULL;
389     + if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
390     + &vcpu->arch.pv_time, data & ~1ULL,
391     + sizeof(struct pvclock_vcpu_time_info)))
392     + vcpu->arch.pv_time_enabled = false;
393     + else
394     + vcpu->arch.pv_time_enabled = true;
395    
396     break;
397     }
398     @@ -1981,7 +1976,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
399     return 1;
400    
401     if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
402     - data & KVM_STEAL_VALID_BITS))
403     + data & KVM_STEAL_VALID_BITS,
404     + sizeof(struct kvm_steal_time)))
405     return 1;
406    
407     vcpu->arch.st.msr_val = data;
408     @@ -2967,7 +2963,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
409     */
410     static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
411     {
412     - if (!vcpu->arch.time_page)
413     + if (!vcpu->arch.pv_time_enabled)
414     return -EINVAL;
415     vcpu->arch.pvclock_set_guest_stopped_request = true;
416     kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
417     @@ -6661,6 +6657,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
418     goto fail_free_wbinvd_dirty_mask;
419    
420     vcpu->arch.ia32_tsc_adjust_msr = 0x0;
421     + vcpu->arch.pv_time_enabled = false;
422     kvm_async_pf_hash_reset(vcpu);
423     kvm_pmu_init(vcpu);
424    
425     diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
426     index ef5356c..0262210 100644
427     --- a/crypto/algif_hash.c
428     +++ b/crypto/algif_hash.c
429     @@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
430     else if (len < ds)
431     msg->msg_flags |= MSG_TRUNC;
432    
433     + msg->msg_namelen = 0;
434     +
435     lock_sock(sk);
436     if (ctx->more) {
437     ctx->more = 0;
438     diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
439     index 6a6dfc0..a1c4f0a 100644
440     --- a/crypto/algif_skcipher.c
441     +++ b/crypto/algif_skcipher.c
442     @@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
443     long copied = 0;
444    
445     lock_sock(sk);
446     + msg->msg_namelen = 0;
447     for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
448     iovlen--, iov++) {
449     unsigned long seglen = iov->iov_len;
450     diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
451     index fe6d4be..615d262 100644
452     --- a/drivers/char/hpet.c
453     +++ b/drivers/char/hpet.c
454     @@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
455     struct hpet_dev *devp;
456     unsigned long addr;
457    
458     - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
459     - return -EINVAL;
460     -
461     devp = file->private_data;
462     addr = devp->hd_hpets->hp_hpet_phys;
463    
464     if (addr & (PAGE_SIZE - 1))
465     return -ENOSYS;
466    
467     - vma->vm_flags |= VM_IO;
468     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
469     -
470     - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
471     - PAGE_SIZE, vma->vm_page_prot)) {
472     - printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
473     - __func__);
474     - return -EAGAIN;
475     - }
476     -
477     - return 0;
478     + return vm_iomap_memory(vma, addr, PAGE_SIZE);
479     #else
480     return -ENOSYS;
481     #endif
482     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
483     index 75b1f89..fd86b37 100644
484     --- a/drivers/md/raid1.c
485     +++ b/drivers/md/raid1.c
486     @@ -1001,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
487     const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
488     const unsigned long do_discard = (bio->bi_rw
489     & (REQ_DISCARD | REQ_SECURE));
490     + const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
491     struct md_rdev *blocked_rdev;
492     struct blk_plug_cb *cb;
493     struct raid1_plug_cb *plug = NULL;
494     @@ -1302,7 +1303,8 @@ read_again:
495     conf->mirrors[i].rdev->data_offset);
496     mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
497     mbio->bi_end_io = raid1_end_write_request;
498     - mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
499     + mbio->bi_rw =
500     + WRITE | do_flush_fua | do_sync | do_discard | do_same;
501     mbio->bi_private = r1_bio;
502    
503     atomic_inc(&r1_bio->remaining);
504     @@ -2819,6 +2821,9 @@ static int run(struct mddev *mddev)
505     if (IS_ERR(conf))
506     return PTR_ERR(conf);
507    
508     + if (mddev->queue)
509     + blk_queue_max_write_same_sectors(mddev->queue,
510     + mddev->chunk_sectors);
511     rdev_for_each(rdev, mddev) {
512     if (!mddev->gendisk)
513     continue;
514     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
515     index 8d925dc..b3898d4 100644
516     --- a/drivers/md/raid10.c
517     +++ b/drivers/md/raid10.c
518     @@ -1106,6 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
519     const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
520     const unsigned long do_discard = (bio->bi_rw
521     & (REQ_DISCARD | REQ_SECURE));
522     + const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
523     unsigned long flags;
524     struct md_rdev *blocked_rdev;
525     struct blk_plug_cb *cb;
526     @@ -1461,7 +1462,8 @@ retry_write:
527     rdev));
528     mbio->bi_bdev = rdev->bdev;
529     mbio->bi_end_io = raid10_end_write_request;
530     - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
531     + mbio->bi_rw =
532     + WRITE | do_sync | do_fua | do_discard | do_same;
533     mbio->bi_private = r10_bio;
534    
535     atomic_inc(&r10_bio->remaining);
536     @@ -1503,7 +1505,8 @@ retry_write:
537     r10_bio, rdev));
538     mbio->bi_bdev = rdev->bdev;
539     mbio->bi_end_io = raid10_end_write_request;
540     - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
541     + mbio->bi_rw =
542     + WRITE | do_sync | do_fua | do_discard | do_same;
543     mbio->bi_private = r10_bio;
544    
545     atomic_inc(&r10_bio->remaining);
546     @@ -3570,6 +3573,8 @@ static int run(struct mddev *mddev)
547     if (mddev->queue) {
548     blk_queue_max_discard_sectors(mddev->queue,
549     mddev->chunk_sectors);
550     + blk_queue_max_write_same_sectors(mddev->queue,
551     + mddev->chunk_sectors);
552     blk_queue_io_min(mddev->queue, chunk_size);
553     if (conf->geo.raid_disks % conf->geo.near_copies)
554     blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
555     diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
556     index 82c0616..6e3d6dc 100644
557     --- a/drivers/mtd/mtdchar.c
558     +++ b/drivers/mtd/mtdchar.c
559     @@ -1159,45 +1159,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
560     struct mtd_file_info *mfi = file->private_data;
561     struct mtd_info *mtd = mfi->mtd;
562     struct map_info *map = mtd->priv;
563     - resource_size_t start, off;
564     - unsigned long len, vma_len;
565    
566     /* This is broken because it assumes the MTD device is map-based
567     and that mtd->priv is a valid struct map_info. It should be
568     replaced with something that uses the mtd_get_unmapped_area()
569     operation properly. */
570     if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
571     - off = get_vm_offset(vma);
572     - start = map->phys;
573     - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
574     - start &= PAGE_MASK;
575     - vma_len = get_vm_size(vma);
576     -
577     - /* Overflow in off+len? */
578     - if (vma_len + off < off)
579     - return -EINVAL;
580     - /* Does it fit in the mapping? */
581     - if (vma_len + off > len)
582     - return -EINVAL;
583     -
584     - off += start;
585     - /* Did that overflow? */
586     - if (off < start)
587     - return -EINVAL;
588     - if (set_vm_offset(vma, off) < 0)
589     - return -EINVAL;
590     - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
591     -
592     #ifdef pgprot_noncached
593     - if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
594     + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
595     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
596     #endif
597     - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
598     - vma->vm_end - vma->vm_start,
599     - vma->vm_page_prot))
600     - return -EAGAIN;
601     -
602     - return 0;
603     + return vm_iomap_memory(vma, map->phys, map->size);
604     }
605     return -ENOSYS;
606     #else
607     diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
608     index 5eaf47b..42b6d69 100644
609     --- a/drivers/net/can/mcp251x.c
610     +++ b/drivers/net/can/mcp251x.c
611     @@ -922,6 +922,7 @@ static int mcp251x_open(struct net_device *net)
612     struct mcp251x_priv *priv = netdev_priv(net);
613     struct spi_device *spi = priv->spi;
614     struct mcp251x_platform_data *pdata = spi->dev.platform_data;
615     + unsigned long flags;
616     int ret;
617    
618     ret = open_candev(net);
619     @@ -938,9 +939,14 @@ static int mcp251x_open(struct net_device *net)
620     priv->tx_skb = NULL;
621     priv->tx_len = 0;
622    
623     + flags = IRQF_ONESHOT;
624     + if (pdata->irq_flags)
625     + flags |= pdata->irq_flags;
626     + else
627     + flags |= IRQF_TRIGGER_FALLING;
628     +
629     ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
630     - pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
631     - DEVICE_NAME, priv);
632     + flags, DEVICE_NAME, priv);
633     if (ret) {
634     dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
635     if (pdata->transceiver_enable)
636     diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
637     index 6433b81..8e0c4a0 100644
638     --- a/drivers/net/can/sja1000/sja1000_of_platform.c
639     +++ b/drivers/net/can/sja1000/sja1000_of_platform.c
640     @@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
641     struct net_device *dev;
642     struct sja1000_priv *priv;
643     struct resource res;
644     - const u32 *prop;
645     - int err, irq, res_size, prop_size;
646     + u32 prop;
647     + int err, irq, res_size;
648     void __iomem *base;
649    
650     err = of_address_to_resource(np, 0, &res);
651     @@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
652     priv->read_reg = sja1000_ofp_read_reg;
653     priv->write_reg = sja1000_ofp_write_reg;
654    
655     - prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
656     - if (prop && (prop_size == sizeof(u32)))
657     - priv->can.clock.freq = *prop / 2;
658     + err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
659     + if (!err)
660     + priv->can.clock.freq = prop / 2;
661     else
662     priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
663    
664     - prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
665     - if (prop && (prop_size == sizeof(u32)))
666     - priv->ocr |= *prop & OCR_MODE_MASK;
667     + err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
668     + if (!err)
669     + priv->ocr |= prop & OCR_MODE_MASK;
670     else
671     priv->ocr |= OCR_MODE_NORMAL; /* default */
672    
673     - prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
674     - if (prop && (prop_size == sizeof(u32)))
675     - priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
676     + err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
677     + if (!err)
678     + priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
679     else
680     priv->ocr |= OCR_TX0_PULLDOWN; /* default */
681    
682     - prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
683     - if (prop && (prop_size == sizeof(u32)) && *prop) {
684     - u32 divider = priv->can.clock.freq * 2 / *prop;
685     + err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
686     + if (!err && prop) {
687     + u32 divider = priv->can.clock.freq * 2 / prop;
688    
689     if (divider > 1)
690     priv->cdr |= divider / 2 - 1;
691     @@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
692     priv->cdr |= CDR_CLK_OFF; /* default */
693     }
694    
695     - prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
696     - if (!prop)
697     + if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
698     priv->cdr |= CDR_CBP; /* default */
699    
700     priv->irq_flags = IRQF_SHARED;
701     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
702     index 8a5253c..6917998 100644
703     --- a/drivers/net/ethernet/broadcom/tg3.c
704     +++ b/drivers/net/ethernet/broadcom/tg3.c
705     @@ -330,6 +330,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
706     {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
707     {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
708     {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
709     + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
710     {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
711     {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
712     {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
713     @@ -9103,7 +9104,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
714     }
715    
716     if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
717     - u32 grc_mode = tr32(GRC_MODE);
718     + u32 grc_mode;
719     +
720     + /* Fix transmit hangs */
721     + val = tr32(TG3_CPMU_PADRNG_CTL);
722     + val |= TG3_CPMU_PADRNG_CTL_RDIV2;
723     + tw32(TG3_CPMU_PADRNG_CTL, val);
724     +
725     + grc_mode = tr32(GRC_MODE);
726    
727     /* Access the lower 1K of DL PCIE block registers. */
728     val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
729     @@ -9413,6 +9421,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
730     if (tg3_flag(tp, PCI_EXPRESS))
731     rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
732    
733     + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
734     + tp->dma_limit = 0;
735     + if (tp->dev->mtu <= ETH_DATA_LEN) {
736     + rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
737     + tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
738     + }
739     + }
740     +
741     if (tg3_flag(tp, HW_TSO_1) ||
742     tg3_flag(tp, HW_TSO_2) ||
743     tg3_flag(tp, HW_TSO_3))
744     diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
745     index d330e81..6f9b74c 100644
746     --- a/drivers/net/ethernet/broadcom/tg3.h
747     +++ b/drivers/net/ethernet/broadcom/tg3.h
748     @@ -1159,6 +1159,8 @@
749     #define CPMU_MUTEX_GNT_DRIVER 0x00001000
750     #define TG3_CPMU_PHY_STRAP 0x00003664
751     #define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
752     +#define TG3_CPMU_PADRNG_CTL 0x00003668
753     +#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000
754     /* 0x3664 --> 0x36b0 unused */
755    
756     #define TG3_CPMU_EEE_MODE 0x000036b0
757     diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
758     index 6e1915a..c00c13a 100644
759     --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
760     +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
761     @@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
762     {0x00008258, 0x00000000},
763     {0x0000825c, 0x40000000},
764     {0x00008260, 0x00080922},
765     - {0x00008264, 0x9bc00010},
766     + {0x00008264, 0x9d400010},
767     {0x00008268, 0xffffffff},
768     {0x0000826c, 0x0000ffff},
769     {0x00008270, 0x00000000},
770     diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
771     index 05d5ba6..0663653 100644
772     --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
773     +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
774     @@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
775     * required version.
776     */
777     if (priv->fw_version_major != MAJOR_VERSION_REQ ||
778     - priv->fw_version_minor != MINOR_VERSION_REQ) {
779     + priv->fw_version_minor < MINOR_VERSION_REQ) {
780     dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
781     MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
782     return -EINVAL;
783     diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
784     index e8486c1..b70f220 100644
785     --- a/drivers/net/wireless/b43/phy_n.c
786     +++ b/drivers/net/wireless/b43/phy_n.c
787     @@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
788     #endif
789     #ifdef CONFIG_B43_SSB
790     case B43_BUS_SSB:
791     - /* FIXME */
792     + ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
793     + avoid);
794     break;
795     #endif
796     }
797     diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
798     index a43415a..bc75528 100644
799     --- a/drivers/ssb/driver_chipcommon_pmu.c
800     +++ b/drivers/ssb/driver_chipcommon_pmu.c
801     @@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
802     return 0;
803     }
804     }
805     +
806     +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
807     +{
808     + u32 pmu_ctl = 0;
809     +
810     + switch (cc->dev->bus->chip_id) {
811     + case 0x4322:
812     + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
813     + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
814     + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
815     + if (spuravoid == 1)
816     + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
817     + else
818     + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
819     + pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
820     + break;
821     + case 43222:
822     + /* TODO: BCM43222 requires updating PLLs too */
823     + return;
824     + default:
825     + ssb_printk(KERN_ERR PFX
826     + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
827     + cc->dev->bus->chip_id);
828     + return;
829     + }
830     +
831     + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
832     +}
833     +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
834     diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
835     index dc61c12..0a49456 100644
836     --- a/drivers/video/fbmem.c
837     +++ b/drivers/video/fbmem.c
838     @@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
839     {
840     struct fb_info *info = file_fb_info(file);
841     struct fb_ops *fb;
842     - unsigned long off;
843     + unsigned long mmio_pgoff;
844     unsigned long start;
845     u32 len;
846    
847     if (!info)
848     return -ENODEV;
849     - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
850     - return -EINVAL;
851     - off = vma->vm_pgoff << PAGE_SHIFT;
852     fb = info->fbops;
853     if (!fb)
854     return -ENODEV;
855     @@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
856     return res;
857     }
858    
859     - /* frame buffer memory */
860     + /*
861     + * Ugh. This can be either the frame buffer mapping, or
862     + * if pgoff points past it, the mmio mapping.
863     + */
864     start = info->fix.smem_start;
865     - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
866     - if (off >= len) {
867     - /* memory mapped io */
868     - off -= len;
869     - if (info->var.accel_flags) {
870     - mutex_unlock(&info->mm_lock);
871     - return -EINVAL;
872     - }
873     + len = info->fix.smem_len;
874     + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
875     + if (vma->vm_pgoff >= mmio_pgoff) {
876     + vma->vm_pgoff -= mmio_pgoff;
877     start = info->fix.mmio_start;
878     - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
879     + len = info->fix.mmio_len;
880     }
881     mutex_unlock(&info->mm_lock);
882     - start &= PAGE_MASK;
883     - if ((vma->vm_end - vma->vm_start + off) > len)
884     - return -EINVAL;
885     - off += start;
886     - vma->vm_pgoff = off >> PAGE_SHIFT;
887     - /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
888     +
889     vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
890     - fb_pgprotect(file, vma, off);
891     - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
892     - vma->vm_end - vma->vm_start, vma->vm_page_prot))
893     - return -EAGAIN;
894     - return 0;
895     + fb_pgprotect(file, vma, start);
896     +
897     + return vm_iomap_memory(vma, start, len);
898     }
899    
900     static int
901     diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
902     index 0c42cdb..5843a47 100644
903     --- a/fs/binfmt_elf.c
904     +++ b/fs/binfmt_elf.c
905     @@ -1132,6 +1132,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
906     goto whole;
907     if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
908     goto whole;
909     + return 0;
910     }
911    
912     /* Do not dump I/O mapped devices or special mappings */
913     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
914     index 744a69b..8a00e2f 100644
915     --- a/fs/btrfs/tree-log.c
916     +++ b/fs/btrfs/tree-log.c
917     @@ -318,6 +318,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
918     unsigned long src_ptr;
919     unsigned long dst_ptr;
920     int overwrite_root = 0;
921     + bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
922    
923     if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
924     overwrite_root = 1;
925     @@ -327,6 +328,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
926    
927     /* look for the key in the destination tree */
928     ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
929     + if (ret < 0)
930     + return ret;
931     +
932     if (ret == 0) {
933     char *src_copy;
934     char *dst_copy;
935     @@ -368,6 +372,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
936     return 0;
937     }
938    
939     + /*
940     + * We need to load the old nbytes into the inode so when we
941     + * replay the extents we've logged we get the right nbytes.
942     + */
943     + if (inode_item) {
944     + struct btrfs_inode_item *item;
945     + u64 nbytes;
946     +
947     + item = btrfs_item_ptr(path->nodes[0], path->slots[0],
948     + struct btrfs_inode_item);
949     + nbytes = btrfs_inode_nbytes(path->nodes[0], item);
950     + item = btrfs_item_ptr(eb, slot,
951     + struct btrfs_inode_item);
952     + btrfs_set_inode_nbytes(eb, item, nbytes);
953     + }
954     + } else if (inode_item) {
955     + struct btrfs_inode_item *item;
956     +
957     + /*
958     + * New inode, set nbytes to 0 so that the nbytes comes out
959     + * properly when we replay the extents.
960     + */
961     + item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
962     + btrfs_set_inode_nbytes(eb, item, 0);
963     }
964     insert:
965     btrfs_release_path(path);
966     @@ -488,7 +516,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
967     u64 mask = root->sectorsize - 1;
968     u64 extent_end;
969     u64 start = key->offset;
970     - u64 saved_nbytes;
971     + u64 nbytes = 0;
972     struct btrfs_file_extent_item *item;
973     struct inode *inode = NULL;
974     unsigned long size;
975     @@ -498,10 +526,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
976     found_type = btrfs_file_extent_type(eb, item);
977    
978     if (found_type == BTRFS_FILE_EXTENT_REG ||
979     - found_type == BTRFS_FILE_EXTENT_PREALLOC)
980     - extent_end = start + btrfs_file_extent_num_bytes(eb, item);
981     - else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
982     + found_type == BTRFS_FILE_EXTENT_PREALLOC) {
983     + nbytes = btrfs_file_extent_num_bytes(eb, item);
984     + extent_end = start + nbytes;
985     +
986     + /*
987     + * We don't add to the inodes nbytes if we are prealloc or a
988     + * hole.
989     + */
990     + if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
991     + nbytes = 0;
992     + } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
993     size = btrfs_file_extent_inline_len(eb, item);
994     + nbytes = btrfs_file_extent_ram_bytes(eb, item);
995     extent_end = (start + size + mask) & ~mask;
996     } else {
997     ret = 0;
998     @@ -550,7 +587,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
999     }
1000     btrfs_release_path(path);
1001    
1002     - saved_nbytes = inode_get_bytes(inode);
1003     /* drop any overlapping extents */
1004     ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
1005     BUG_ON(ret);
1006     @@ -637,7 +673,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
1007     BUG_ON(ret);
1008     }
1009    
1010     - inode_set_bytes(inode, saved_nbytes);
1011     + inode_add_bytes(inode, nbytes);
1012     ret = btrfs_update_inode(trans, root, inode);
1013     out:
1014     if (inode)
1015     diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
1016     index eba76ea..fc8ddc1 100644
1017     --- a/fs/hfsplus/extents.c
1018     +++ b/fs/hfsplus/extents.c
1019     @@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)
1020     struct address_space *mapping = inode->i_mapping;
1021     struct page *page;
1022     void *fsdata;
1023     - u32 size = inode->i_size;
1024     + loff_t size = inode->i_size;
1025    
1026     res = pagecache_write_begin(NULL, mapping, size, 0,
1027     AOP_FLAG_UNINTERRUPTIBLE,
1028     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1029     index 78bde32..ccee8cc 100644
1030     --- a/fs/hugetlbfs/inode.c
1031     +++ b/fs/hugetlbfs/inode.c
1032     @@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1033     * way when do_mmap_pgoff unwinds (may be important on powerpc
1034     * and ia64).
1035     */
1036     - vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
1037     + vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
1038     vma->vm_ops = &hugetlb_vm_ops;
1039    
1040     if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
1041     diff --git a/fs/proc/array.c b/fs/proc/array.c
1042     index 6a91e6f..be3c22f 100644
1043     --- a/fs/proc/array.c
1044     +++ b/fs/proc/array.c
1045     @@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
1046     "x (dead)", /* 64 */
1047     "K (wakekill)", /* 128 */
1048     "W (waking)", /* 256 */
1049     + "P (parked)", /* 512 */
1050     };
1051    
1052     static inline const char *get_task_state(struct task_struct *tsk)
1053     diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
1054     index 2c497ab..ffdf8b7 100644
1055     --- a/include/linux/kvm_host.h
1056     +++ b/include/linux/kvm_host.h
1057     @@ -511,7 +511,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1058     int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1059     void *data, unsigned long len);
1060     int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1061     - gpa_t gpa);
1062     + gpa_t gpa, unsigned long len);
1063     int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
1064     int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1065     struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1066     diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
1067     index fa7cc72..b0bcce0 100644
1068     --- a/include/linux/kvm_types.h
1069     +++ b/include/linux/kvm_types.h
1070     @@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
1071     u64 generation;
1072     gpa_t gpa;
1073     unsigned long hva;
1074     + unsigned long len;
1075     struct kvm_memory_slot *memslot;
1076     };
1077    
1078     diff --git a/include/linux/mm.h b/include/linux/mm.h
1079     index 66e2f7c..9568b90 100644
1080     --- a/include/linux/mm.h
1081     +++ b/include/linux/mm.h
1082     @@ -1623,6 +1623,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1083     unsigned long pfn);
1084     int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1085     unsigned long pfn);
1086     +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1087     +
1088    
1089     struct page *follow_page(struct vm_area_struct *, unsigned long address,
1090     unsigned int foll_flags);
1091     diff --git a/include/linux/sched.h b/include/linux/sched.h
1092     index d211247..7e49270 100644
1093     --- a/include/linux/sched.h
1094     +++ b/include/linux/sched.h
1095     @@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
1096     #define TASK_DEAD 64
1097     #define TASK_WAKEKILL 128
1098     #define TASK_WAKING 256
1099     -#define TASK_STATE_MAX 512
1100     +#define TASK_PARKED 512
1101     +#define TASK_STATE_MAX 1024
1102    
1103     -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
1104     +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
1105    
1106     extern char ___assert_task_state[1 - 2*!!(
1107     sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
1108     diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
1109     index 9e492be..6fcfe99 100644
1110     --- a/include/linux/ssb/ssb_driver_chipcommon.h
1111     +++ b/include/linux/ssb/ssb_driver_chipcommon.h
1112     @@ -219,6 +219,7 @@
1113     #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
1114     #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
1115     #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
1116     +#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400
1117     #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
1118     #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
1119     #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
1120     @@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id {
1121     void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
1122     enum ssb_pmu_ldo_volt_id id, u32 voltage);
1123     void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
1124     +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
1125    
1126     #endif /* LINUX_SSB_CHIPCO_H_ */
1127     diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
1128     index 5a8671e..e5586ca 100644
1129     --- a/include/trace/events/sched.h
1130     +++ b/include/trace/events/sched.h
1131     @@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
1132     __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
1133     { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
1134     { 16, "Z" }, { 32, "X" }, { 64, "x" },
1135     - { 128, "W" }) : "R",
1136     + { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
1137     __entry->prev_state & TASK_STATE_MAX ? "+" : "",
1138     __entry->next_comm, __entry->next_pid, __entry->next_prio)
1139     );
1140     diff --git a/kernel/events/core.c b/kernel/events/core.c
1141     index 7b6646a..0600d3b 100644
1142     --- a/kernel/events/core.c
1143     +++ b/kernel/events/core.c
1144     @@ -5328,7 +5328,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
1145    
1146     static int perf_swevent_init(struct perf_event *event)
1147     {
1148     - int event_id = event->attr.config;
1149     + u64 event_id = event->attr.config;
1150    
1151     if (event->attr.type != PERF_TYPE_SOFTWARE)
1152     return -ENOENT;
1153     diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
1154     index cdd5607..e4cee8d 100644
1155     --- a/kernel/hrtimer.c
1156     +++ b/kernel/hrtimer.c
1157     @@ -61,6 +61,7 @@
1158     DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
1159     {
1160    
1161     + .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
1162     .clock_base =
1163     {
1164     {
1165     @@ -1640,8 +1641,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1166     struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1167     int i;
1168    
1169     - raw_spin_lock_init(&cpu_base->lock);
1170     -
1171     for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1172     cpu_base->clock_base[i].cpu_base = cpu_base;
1173     timerqueue_init_head(&cpu_base->clock_base[i].active);
1174     diff --git a/kernel/kthread.c b/kernel/kthread.c
1175     index 691dc2e..9eb7fed 100644
1176     --- a/kernel/kthread.c
1177     +++ b/kernel/kthread.c
1178     @@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
1179    
1180     static void __kthread_parkme(struct kthread *self)
1181     {
1182     - __set_current_state(TASK_INTERRUPTIBLE);
1183     + __set_current_state(TASK_PARKED);
1184     while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
1185     if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
1186     complete(&self->parked);
1187     schedule();
1188     - __set_current_state(TASK_INTERRUPTIBLE);
1189     + __set_current_state(TASK_PARKED);
1190     }
1191     clear_bit(KTHREAD_IS_PARKED, &self->flags);
1192     __set_current_state(TASK_RUNNING);
1193     @@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
1194     }
1195     EXPORT_SYMBOL(kthread_create_on_node);
1196    
1197     -static void __kthread_bind(struct task_struct *p, unsigned int cpu)
1198     +static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
1199     {
1200     + /* Must have done schedule() in kthread() before we set_task_cpu */
1201     + if (!wait_task_inactive(p, state)) {
1202     + WARN_ON(1);
1203     + return;
1204     + }
1205     /* It's safe because the task is inactive. */
1206     do_set_cpus_allowed(p, cpumask_of(cpu));
1207     p->flags |= PF_THREAD_BOUND;
1208     @@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
1209     */
1210     void kthread_bind(struct task_struct *p, unsigned int cpu)
1211     {
1212     - /* Must have done schedule() in kthread() before we set_task_cpu */
1213     - if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
1214     - WARN_ON(1);
1215     - return;
1216     - }
1217     - __kthread_bind(p, cpu);
1218     + __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
1219     }
1220     EXPORT_SYMBOL(kthread_bind);
1221    
1222     @@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
1223     return NULL;
1224     }
1225    
1226     +static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
1227     +{
1228     + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
1229     + /*
1230     + * We clear the IS_PARKED bit here as we don't wait
1231     + * until the task has left the park code. So if we'd
1232     + * park before that happens we'd see the IS_PARKED bit
1233     + * which might be about to be cleared.
1234     + */
1235     + if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
1236     + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
1237     + __kthread_bind(k, kthread->cpu, TASK_PARKED);
1238     + wake_up_state(k, TASK_PARKED);
1239     + }
1240     +}
1241     +
1242     /**
1243     * kthread_unpark - unpark a thread created by kthread_create().
1244     * @k: thread created by kthread_create().
1245     @@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
1246     {
1247     struct kthread *kthread = task_get_live_kthread(k);
1248    
1249     - if (kthread) {
1250     - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
1251     - /*
1252     - * We clear the IS_PARKED bit here as we don't wait
1253     - * until the task has left the park code. So if we'd
1254     - * park before that happens we'd see the IS_PARKED bit
1255     - * which might be about to be cleared.
1256     - */
1257     - if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
1258     - if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
1259     - __kthread_bind(k, kthread->cpu);
1260     - wake_up_process(k);
1261     - }
1262     - }
1263     + if (kthread)
1264     + __kthread_unpark(k, kthread);
1265     put_task_struct(k);
1266     }
1267    
1268     @@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
1269     trace_sched_kthread_stop(k);
1270     if (kthread) {
1271     set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
1272     - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
1273     + __kthread_unpark(k, kthread);
1274     wake_up_process(k);
1275     wait_for_completion(&kthread->exited);
1276     }
1277     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1278     index 26058d0..5e2f7c3 100644
1279     --- a/kernel/sched/core.c
1280     +++ b/kernel/sched/core.c
1281     @@ -1488,8 +1488,10 @@ static void try_to_wake_up_local(struct task_struct *p)
1282     {
1283     struct rq *rq = task_rq(p);
1284    
1285     - BUG_ON(rq != this_rq());
1286     - BUG_ON(p == current);
1287     + if (WARN_ON_ONCE(rq != this_rq()) ||
1288     + WARN_ON_ONCE(p == current))
1289     + return;
1290     +
1291     lockdep_assert_held(&rq->lock);
1292    
1293     if (!raw_spin_trylock(&p->pi_lock)) {
1294     @@ -4948,7 +4950,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
1295     }
1296    
1297     static int min_load_idx = 0;
1298     -static int max_load_idx = CPU_LOAD_IDX_MAX;
1299     +static int max_load_idx = CPU_LOAD_IDX_MAX-1;
1300    
1301     static void
1302     set_table_entry(struct ctl_table *entry,
1303     diff --git a/kernel/signal.c b/kernel/signal.c
1304     index dec9c30..50e425c 100644
1305     --- a/kernel/signal.c
1306     +++ b/kernel/signal.c
1307     @@ -2880,7 +2880,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
1308    
1309     static int do_tkill(pid_t tgid, pid_t pid, int sig)
1310     {
1311     - struct siginfo info;
1312     + struct siginfo info = {};
1313    
1314     info.si_signo = sig;
1315     info.si_errno = 0;
1316     diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
1317     index f45e128..f359dc7 100644
1318     --- a/kernel/user_namespace.c
1319     +++ b/kernel/user_namespace.c
1320     @@ -25,7 +25,8 @@
1321    
1322     static struct kmem_cache *user_ns_cachep __read_mostly;
1323    
1324     -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
1325     +static bool new_idmap_permitted(const struct file *file,
1326     + struct user_namespace *ns, int cap_setid,
1327     struct uid_gid_map *map);
1328    
1329     static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
1330     @@ -575,10 +576,10 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1331     if (map->nr_extents != 0)
1332     goto out;
1333    
1334     - /* Require the appropriate privilege CAP_SETUID or CAP_SETGID
1335     - * over the user namespace in order to set the id mapping.
1336     + /*
1337     + * Adjusting namespace settings requires capabilities on the target.
1338     */
1339     - if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid))
1340     + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
1341     goto out;
1342    
1343     /* Get a buffer */
1344     @@ -666,7 +667,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1345    
1346     ret = -EPERM;
1347     /* Validate the user is allowed to use user id's mapped to. */
1348     - if (!new_idmap_permitted(ns, cap_setid, &new_map))
1349     + if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
1350     goto out;
1351    
1352     /* Map the lower ids from the parent user namespace to the
1353     @@ -753,7 +754,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t
1354     &ns->projid_map, &ns->parent->projid_map);
1355     }
1356    
1357     -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
1358     +static bool new_idmap_permitted(const struct file *file,
1359     + struct user_namespace *ns, int cap_setid,
1360     struct uid_gid_map *new_map)
1361     {
1362     /* Allow mapping to your own filesystem ids */
1363     @@ -761,12 +763,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
1364     u32 id = new_map->extent[0].lower_first;
1365     if (cap_setid == CAP_SETUID) {
1366     kuid_t uid = make_kuid(ns->parent, id);
1367     - if (uid_eq(uid, current_fsuid()))
1368     + if (uid_eq(uid, file->f_cred->fsuid))
1369     return true;
1370     }
1371     else if (cap_setid == CAP_SETGID) {
1372     kgid_t gid = make_kgid(ns->parent, id);
1373     - if (gid_eq(gid, current_fsgid()))
1374     + if (gid_eq(gid, file->f_cred->fsgid))
1375     return true;
1376     }
1377     }
1378     @@ -777,8 +779,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
1379    
1380     /* Allow the specified ids if we have the appropriate capability
1381     * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1382     + * And the opener of the id file also had the approprpiate capability.
1383     */
1384     - if (ns_capable(ns->parent, cap_setid))
1385     + if (ns_capable(ns->parent, cap_setid) &&
1386     + file_ns_capable(file, ns->parent, cap_setid))
1387     return true;
1388    
1389     return false;
1390     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1391     index d7cec92..88eb939 100644
1392     --- a/mm/hugetlb.c
1393     +++ b/mm/hugetlb.c
1394     @@ -2965,7 +2965,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1395     break;
1396     }
1397    
1398     - if (absent ||
1399     + /*
1400     + * We need call hugetlb_fault for both hugepages under migration
1401     + * (in which case hugetlb_fault waits for the migration,) and
1402     + * hwpoisoned hugepages (in which case we need to prevent the
1403     + * caller from accessing to them.) In order to do this, we use
1404     + * here is_swap_pte instead of is_hugetlb_entry_migration and
1405     + * is_hugetlb_entry_hwpoisoned. This is because it simply covers
1406     + * both cases, and because we can't follow correct pages
1407     + * directly from any kind of swap entries.
1408     + */
1409     + if (absent || is_swap_pte(huge_ptep_get(pte)) ||
1410     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
1411     int ret;
1412    
1413     diff --git a/mm/memory.c b/mm/memory.c
1414     index f8b734a..32a495a 100644
1415     --- a/mm/memory.c
1416     +++ b/mm/memory.c
1417     @@ -2358,6 +2358,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1418     }
1419     EXPORT_SYMBOL(remap_pfn_range);
1420    
1421     +/**
1422     + * vm_iomap_memory - remap memory to userspace
1423     + * @vma: user vma to map to
1424     + * @start: start of area
1425     + * @len: size of area
1426     + *
1427     + * This is a simplified io_remap_pfn_range() for common driver use. The
1428     + * driver just needs to give us the physical memory range to be mapped,
1429     + * we'll figure out the rest from the vma information.
1430     + *
1431     + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
1432     + * whatever write-combining details or similar.
1433     + */
1434     +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1435     +{
1436     + unsigned long vm_len, pfn, pages;
1437     +
1438     + /* Check that the physical memory area passed in looks valid */
1439     + if (start + len < start)
1440     + return -EINVAL;
1441     + /*
1442     + * You *really* shouldn't map things that aren't page-aligned,
1443     + * but we've historically allowed it because IO memory might
1444     + * just have smaller alignment.
1445     + */
1446     + len += start & ~PAGE_MASK;
1447     + pfn = start >> PAGE_SHIFT;
1448     + pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
1449     + if (pfn + pages < pfn)
1450     + return -EINVAL;
1451     +
1452     + /* We start the mapping 'vm_pgoff' pages into the area */
1453     + if (vma->vm_pgoff > pages)
1454     + return -EINVAL;
1455     + pfn += vma->vm_pgoff;
1456     + pages -= vma->vm_pgoff;
1457     +
1458     + /* Can we fit all of the mapping? */
1459     + vm_len = vma->vm_end - vma->vm_start;
1460     + if (vm_len >> PAGE_SHIFT > pages)
1461     + return -EINVAL;
1462     +
1463     + /* Ok, let it rip */
1464     + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1465     +}
1466     +EXPORT_SYMBOL(vm_iomap_memory);
1467     +
1468     static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1469     unsigned long addr, unsigned long end,
1470     pte_fn_t fn, void *data)
1471     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
1472     index e14e676..a1a7997 100644
1473     --- a/net/mac80211/mlme.c
1474     +++ b/net/mac80211/mlme.c
1475     @@ -3723,8 +3723,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
1476     /* prep auth_data so we don't go into idle on disassoc */
1477     ifmgd->auth_data = auth_data;
1478    
1479     - if (ifmgd->associated)
1480     - ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1481     + if (ifmgd->associated) {
1482     + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
1483     +
1484     + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1485     + WLAN_REASON_UNSPECIFIED,
1486     + false, frame_buf);
1487     +
1488     + __cfg80211_send_deauth(sdata->dev, frame_buf,
1489     + sizeof(frame_buf));
1490     + }
1491    
1492     sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
1493    
1494     @@ -3783,8 +3791,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1495    
1496     mutex_lock(&ifmgd->mtx);
1497    
1498     - if (ifmgd->associated)
1499     - ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1500     + if (ifmgd->associated) {
1501     + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
1502     +
1503     + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1504     + WLAN_REASON_UNSPECIFIED,
1505     + false, frame_buf);
1506     +
1507     + __cfg80211_send_deauth(sdata->dev, frame_buf,
1508     + sizeof(frame_buf));
1509     + }
1510    
1511     if (ifmgd->auth_data && !ifmgd->auth_data->done) {
1512     err = -EBUSY;
1513     diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
1514     index 09b4286..f4aaf5a 100644
1515     --- a/sound/core/pcm_native.c
1516     +++ b/sound/core/pcm_native.c
1517     @@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
1518     int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
1519     struct vm_area_struct *area)
1520     {
1521     - long size;
1522     - unsigned long offset;
1523     + struct snd_pcm_runtime *runtime = substream->runtime;;
1524    
1525     area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
1526     - area->vm_flags |= VM_IO;
1527     - size = area->vm_end - area->vm_start;
1528     - offset = area->vm_pgoff << PAGE_SHIFT;
1529     - if (io_remap_pfn_range(area, area->vm_start,
1530     - (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
1531     - size, area->vm_page_prot))
1532     - return -EAGAIN;
1533     - return 0;
1534     + return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
1535     }
1536    
1537     EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
1538     diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
1539     index cfb7e4d..52058f0 100644
1540     --- a/virt/kvm/ioapic.c
1541     +++ b/virt/kvm/ioapic.c
1542     @@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
1543     u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
1544     u64 redir_content;
1545    
1546     - ASSERT(redir_index < IOAPIC_NUM_PINS);
1547     + if (redir_index < IOAPIC_NUM_PINS)
1548     + redir_content =
1549     + ioapic->redirtbl[redir_index].bits;
1550     + else
1551     + redir_content = ~0ULL;
1552    
1553     - redir_content = ioapic->redirtbl[redir_index].bits;
1554     result = (ioapic->ioregsel & 0x1) ?
1555     (redir_content >> 32) & 0xffffffff :
1556     redir_content & 0xffffffff;
1557     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1558     index 1cd693a..10afa34 100644
1559     --- a/virt/kvm/kvm_main.c
1560     +++ b/virt/kvm/kvm_main.c
1561     @@ -1476,21 +1476,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1562     }
1563    
1564     int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1565     - gpa_t gpa)
1566     + gpa_t gpa, unsigned long len)
1567     {
1568     struct kvm_memslots *slots = kvm_memslots(kvm);
1569     int offset = offset_in_page(gpa);
1570     - gfn_t gfn = gpa >> PAGE_SHIFT;
1571     + gfn_t start_gfn = gpa >> PAGE_SHIFT;
1572     + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1573     + gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1574     + gfn_t nr_pages_avail;
1575    
1576     ghc->gpa = gpa;
1577     ghc->generation = slots->generation;
1578     - ghc->memslot = gfn_to_memslot(kvm, gfn);
1579     - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1580     - if (!kvm_is_error_hva(ghc->hva))
1581     + ghc->len = len;
1582     + ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1583     + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
1584     + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
1585     ghc->hva += offset;
1586     - else
1587     - return -EFAULT;
1588     -
1589     + } else {
1590     + /*
1591     + * If the requested region crosses two memslots, we still
1592     + * verify that the entire region is valid here.
1593     + */
1594     + while (start_gfn <= end_gfn) {
1595     + ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1596     + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1597     + &nr_pages_avail);
1598     + if (kvm_is_error_hva(ghc->hva))
1599     + return -EFAULT;
1600     + start_gfn += nr_pages_avail;
1601     + }
1602     + /* Use the slow path for cross page reads and writes. */
1603     + ghc->memslot = NULL;
1604     + }
1605     return 0;
1606     }
1607     EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1608     @@ -1501,8 +1518,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1609     struct kvm_memslots *slots = kvm_memslots(kvm);
1610     int r;
1611    
1612     + BUG_ON(len > ghc->len);
1613     +
1614     if (slots->generation != ghc->generation)
1615     - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1616     + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1617     +
1618     + if (unlikely(!ghc->memslot))
1619     + return kvm_write_guest(kvm, ghc->gpa, data, len);
1620    
1621     if (kvm_is_error_hva(ghc->hva))
1622     return -EFAULT;
1623     @@ -1522,8 +1544,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1624     struct kvm_memslots *slots = kvm_memslots(kvm);
1625     int r;
1626    
1627     + BUG_ON(len > ghc->len);
1628     +
1629     if (slots->generation != ghc->generation)
1630     - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1631     + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1632     +
1633     + if (unlikely(!ghc->memslot))
1634     + return kvm_read_guest(kvm, ghc->gpa, data, len);
1635    
1636     if (kvm_is_error_hva(ghc->hva))
1637     return -EFAULT;