Magellan Linux

Contents of /trunk/kernel-lts/patches-3.4/0141-3.4.42-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2201 - (show annotations) (download)
Thu Jun 13 10:35:06 2013 UTC (10 years, 10 months ago) by niro
File size: 38265 byte(s)
-linux-3.4.42
1 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2 index 186c8cb..85d6332 100644
3 --- a/arch/arm/kernel/perf_event.c
4 +++ b/arch/arm/kernel/perf_event.c
5 @@ -319,7 +319,10 @@ validate_event(struct pmu_hw_events *hw_events,
6 struct hw_perf_event fake_event = event->hw;
7 struct pmu *leader_pmu = event->group_leader->pmu;
8
9 - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
10 + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
11 + return 1;
12 +
13 + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
14 return 1;
15
16 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
17 diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
18 index dd3d591..48bc3c0 100644
19 --- a/arch/arm/mm/cache-feroceon-l2.c
20 +++ b/arch/arm/mm/cache-feroceon-l2.c
21 @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
22 outer_cache.inv_range = feroceon_l2_inv_range;
23 outer_cache.clean_range = feroceon_l2_clean_range;
24 outer_cache.flush_range = feroceon_l2_flush_range;
25 + outer_cache.inv_all = l2_inv_all;
26
27 enable_l2();
28
29 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
30 index cb941ae..aeeb126 100644
31 --- a/arch/arm/mm/proc-arm920.S
32 +++ b/arch/arm/mm/proc-arm920.S
33 @@ -383,7 +383,7 @@ ENTRY(cpu_arm920_set_pte_ext)
34 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
35 .globl cpu_arm920_suspend_size
36 .equ cpu_arm920_suspend_size, 4 * 3
37 -#ifdef CONFIG_PM_SLEEP
38 +#ifdef CONFIG_ARM_CPU_SUSPEND
39 ENTRY(cpu_arm920_do_suspend)
40 stmfd sp!, {r4 - r6, lr}
41 mrc p15, 0, r4, c13, c0, 0 @ PID
42 diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
43 index 820259b..ee29dc4 100644
44 --- a/arch/arm/mm/proc-arm926.S
45 +++ b/arch/arm/mm/proc-arm926.S
46 @@ -398,7 +398,7 @@ ENTRY(cpu_arm926_set_pte_ext)
47 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
48 .globl cpu_arm926_suspend_size
49 .equ cpu_arm926_suspend_size, 4 * 3
50 -#ifdef CONFIG_PM_SLEEP
51 +#ifdef CONFIG_ARM_CPU_SUSPEND
52 ENTRY(cpu_arm926_do_suspend)
53 stmfd sp!, {r4 - r6, lr}
54 mrc p15, 0, r4, c13, c0, 0 @ PID
55 diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
56 index 3aa0da1..d92dfd0 100644
57 --- a/arch/arm/mm/proc-sa1100.S
58 +++ b/arch/arm/mm/proc-sa1100.S
59 @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
60
61 .globl cpu_sa1100_suspend_size
62 .equ cpu_sa1100_suspend_size, 4 * 3
63 -#ifdef CONFIG_PM_SLEEP
64 +#ifdef CONFIG_ARM_CPU_SUSPEND
65 ENTRY(cpu_sa1100_do_suspend)
66 stmfd sp!, {r4 - r6, lr}
67 mrc p15, 0, r4, c3, c0, 0 @ domain ID
68 diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
69 index 5900cd5..897486c 100644
70 --- a/arch/arm/mm/proc-v6.S
71 +++ b/arch/arm/mm/proc-v6.S
72 @@ -132,7 +132,7 @@ ENTRY(cpu_v6_set_pte_ext)
73 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
74 .globl cpu_v6_suspend_size
75 .equ cpu_v6_suspend_size, 4 * 6
76 -#ifdef CONFIG_PM_SLEEP
77 +#ifdef CONFIG_ARM_CPU_SUSPEND
78 ENTRY(cpu_v6_do_suspend)
79 stmfd sp!, {r4 - r9, lr}
80 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
81 diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
82 index b0d5786..a2d1e86 100644
83 --- a/arch/arm/mm/proc-xsc3.S
84 +++ b/arch/arm/mm/proc-xsc3.S
85 @@ -410,7 +410,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
86
87 .globl cpu_xsc3_suspend_size
88 .equ cpu_xsc3_suspend_size, 4 * 6
89 -#ifdef CONFIG_PM_SLEEP
90 +#ifdef CONFIG_ARM_CPU_SUSPEND
91 ENTRY(cpu_xsc3_do_suspend)
92 stmfd sp!, {r4 - r9, lr}
93 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
94 diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
95 index 4ffebaa..9882153 100644
96 --- a/arch/arm/mm/proc-xscale.S
97 +++ b/arch/arm/mm/proc-xscale.S
98 @@ -524,7 +524,7 @@ ENTRY(cpu_xscale_set_pte_ext)
99
100 .globl cpu_xscale_suspend_size
101 .equ cpu_xscale_suspend_size, 4 * 6
102 -#ifdef CONFIG_PM_SLEEP
103 +#ifdef CONFIG_ARM_CPU_SUSPEND
104 ENTRY(cpu_xscale_do_suspend)
105 stmfd sp!, {r4 - r9, lr}
106 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
107 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
108 index e216ba0..d57eacb 100644
109 --- a/arch/x86/include/asm/kvm_host.h
110 +++ b/arch/x86/include/asm/kvm_host.h
111 @@ -407,8 +407,8 @@ struct kvm_vcpu_arch {
112 gpa_t time;
113 struct pvclock_vcpu_time_info hv_clock;
114 unsigned int hw_tsc_khz;
115 - unsigned int time_offset;
116 - struct page *time_page;
117 + struct gfn_to_hva_cache pv_time;
118 + bool pv_time_enabled;
119
120 struct {
121 u64 msr_val;
122 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
123 index 26b3e2f..268b245 100644
124 --- a/arch/x86/kernel/cpu/perf_event_intel.c
125 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
126 @@ -126,8 +126,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
127 };
128
129 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
130 - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
131 - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
132 + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
133 + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
134 + EVENT_EXTRA_END
135 +};
136 +
137 +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
138 + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
139 + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
140 EVENT_EXTRA_END
141 };
142
143 @@ -1851,7 +1857,10 @@ __init int intel_pmu_init(void)
144
145 x86_pmu.event_constraints = intel_snb_event_constraints;
146 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
147 - x86_pmu.extra_regs = intel_snb_extra_regs;
148 + if (boot_cpu_data.x86_model == 45)
149 + x86_pmu.extra_regs = intel_snbep_extra_regs;
150 + else
151 + x86_pmu.extra_regs = intel_snb_extra_regs;
152 /* all extra regs are per-cpu when HT is on */
153 x86_pmu.er_flags |= ERF_HAS_RSP_1;
154 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
155 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
156 index e28fb97..b27b452 100644
157 --- a/arch/x86/kvm/x86.c
158 +++ b/arch/x86/kvm/x86.c
159 @@ -1114,7 +1114,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
160 {
161 unsigned long flags;
162 struct kvm_vcpu_arch *vcpu = &v->arch;
163 - void *shared_kaddr;
164 unsigned long this_tsc_khz;
165 s64 kernel_ns, max_kernel_ns;
166 u64 tsc_timestamp;
167 @@ -1150,7 +1149,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
168
169 local_irq_restore(flags);
170
171 - if (!vcpu->time_page)
172 + if (!vcpu->pv_time_enabled)
173 return 0;
174
175 /*
176 @@ -1208,14 +1207,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
177 */
178 vcpu->hv_clock.version += 2;
179
180 - shared_kaddr = kmap_atomic(vcpu->time_page);
181 -
182 - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
183 - sizeof(vcpu->hv_clock));
184 -
185 - kunmap_atomic(shared_kaddr);
186 -
187 - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
188 + kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
189 + &vcpu->hv_clock,
190 + sizeof(vcpu->hv_clock));
191 return 0;
192 }
193
194 @@ -1494,7 +1488,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
195 return 0;
196 }
197
198 - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
199 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
200 + sizeof(u32)))
201 return 1;
202
203 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
204 @@ -1504,10 +1499,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
205
206 static void kvmclock_reset(struct kvm_vcpu *vcpu)
207 {
208 - if (vcpu->arch.time_page) {
209 - kvm_release_page_dirty(vcpu->arch.time_page);
210 - vcpu->arch.time_page = NULL;
211 - }
212 + vcpu->arch.pv_time_enabled = false;
213 }
214
215 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
216 @@ -1602,6 +1594,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
217 break;
218 case MSR_KVM_SYSTEM_TIME_NEW:
219 case MSR_KVM_SYSTEM_TIME: {
220 + u64 gpa_offset;
221 kvmclock_reset(vcpu);
222
223 vcpu->arch.time = data;
224 @@ -1611,16 +1604,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
225 if (!(data & 1))
226 break;
227
228 - /* ...but clean it before doing the actual write */
229 - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
230 + gpa_offset = data & ~(PAGE_MASK | 1);
231
232 - vcpu->arch.time_page =
233 - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
234 -
235 - if (is_error_page(vcpu->arch.time_page)) {
236 - kvm_release_page_clean(vcpu->arch.time_page);
237 - vcpu->arch.time_page = NULL;
238 - }
239 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
240 + &vcpu->arch.pv_time, data & ~1ULL,
241 + sizeof(struct pvclock_vcpu_time_info)))
242 + vcpu->arch.pv_time_enabled = false;
243 + else
244 + vcpu->arch.pv_time_enabled = true;
245 break;
246 }
247 case MSR_KVM_ASYNC_PF_EN:
248 @@ -1636,7 +1627,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
249 return 1;
250
251 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
252 - data & KVM_STEAL_VALID_BITS))
253 + data & KVM_STEAL_VALID_BITS,
254 + sizeof(struct kvm_steal_time)))
255 return 1;
256
257 vcpu->arch.st.msr_val = data;
258 @@ -6167,6 +6159,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
259 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
260 goto fail_free_mce_banks;
261
262 + vcpu->arch.pv_time_enabled = false;
263 kvm_async_pf_hash_reset(vcpu);
264 kvm_pmu_init(vcpu);
265
266 diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
267 index ef5356c..0262210 100644
268 --- a/crypto/algif_hash.c
269 +++ b/crypto/algif_hash.c
270 @@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
271 else if (len < ds)
272 msg->msg_flags |= MSG_TRUNC;
273
274 + msg->msg_namelen = 0;
275 +
276 lock_sock(sk);
277 if (ctx->more) {
278 ctx->more = 0;
279 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
280 index 6a6dfc0..a1c4f0a 100644
281 --- a/crypto/algif_skcipher.c
282 +++ b/crypto/algif_skcipher.c
283 @@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
284 long copied = 0;
285
286 lock_sock(sk);
287 + msg->msg_namelen = 0;
288 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
289 iovlen--, iov++) {
290 unsigned long seglen = iov->iov_len;
291 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
292 index dfd7876..0ff5c2e 100644
293 --- a/drivers/char/hpet.c
294 +++ b/drivers/char/hpet.c
295 @@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
296 struct hpet_dev *devp;
297 unsigned long addr;
298
299 - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
300 - return -EINVAL;
301 -
302 devp = file->private_data;
303 addr = devp->hd_hpets->hp_hpet_phys;
304
305 if (addr & (PAGE_SIZE - 1))
306 return -ENOSYS;
307
308 - vma->vm_flags |= VM_IO;
309 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
310 -
311 - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
312 - PAGE_SIZE, vma->vm_page_prot)) {
313 - printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
314 - __func__);
315 - return -EAGAIN;
316 - }
317 -
318 - return 0;
319 + return vm_iomap_memory(vma, addr, PAGE_SIZE);
320 #else
321 return -ENOSYS;
322 #endif
323 diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
324 index 58434e8..37fe246 100644
325 --- a/drivers/gpu/vga/vga_switcheroo.c
326 +++ b/drivers/gpu/vga/vga_switcheroo.c
327 @@ -26,6 +26,7 @@
328 #include <linux/fb.h>
329
330 #include <linux/pci.h>
331 +#include <linux/console.h>
332 #include <linux/vga_switcheroo.h>
333
334 struct vga_switcheroo_client {
335 @@ -256,8 +257,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
336
337 if (new_client->fb_info) {
338 struct fb_event event;
339 + console_lock();
340 event.info = new_client->fb_info;
341 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
342 + console_unlock();
343 }
344
345 ret = vgasr_priv.handler->switchto(new_client->id);
346 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
347 index 76afcb4..6e3d6dc 100644
348 --- a/drivers/mtd/mtdchar.c
349 +++ b/drivers/mtd/mtdchar.c
350 @@ -1159,45 +1159,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
351 struct mtd_file_info *mfi = file->private_data;
352 struct mtd_info *mtd = mfi->mtd;
353 struct map_info *map = mtd->priv;
354 - resource_size_t start, off;
355 - unsigned long len, vma_len;
356
357 /* This is broken because it assumes the MTD device is map-based
358 and that mtd->priv is a valid struct map_info. It should be
359 replaced with something that uses the mtd_get_unmapped_area()
360 operation properly. */
361 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
362 - off = get_vm_offset(vma);
363 - start = map->phys;
364 - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
365 - start &= PAGE_MASK;
366 - vma_len = get_vm_size(vma);
367 -
368 - /* Overflow in off+len? */
369 - if (vma_len + off < off)
370 - return -EINVAL;
371 - /* Does it fit in the mapping? */
372 - if (vma_len + off > len)
373 - return -EINVAL;
374 -
375 - off += start;
376 - /* Did that overflow? */
377 - if (off < start)
378 - return -EINVAL;
379 - if (set_vm_offset(vma, off) < 0)
380 - return -EINVAL;
381 - vma->vm_flags |= VM_IO | VM_RESERVED;
382 -
383 #ifdef pgprot_noncached
384 - if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
385 + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
386 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
387 #endif
388 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
389 - vma->vm_end - vma->vm_start,
390 - vma->vm_page_prot))
391 - return -EAGAIN;
392 -
393 - return 0;
394 + return vm_iomap_memory(vma, map->phys, map->size);
395 }
396 return -ENOSYS;
397 #else
398 diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
399 index f2683eb..c505b55 100644
400 --- a/drivers/net/can/sja1000/sja1000_of_platform.c
401 +++ b/drivers/net/can/sja1000/sja1000_of_platform.c
402 @@ -94,8 +94,8 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
403 struct net_device *dev;
404 struct sja1000_priv *priv;
405 struct resource res;
406 - const u32 *prop;
407 - int err, irq, res_size, prop_size;
408 + u32 prop;
409 + int err, irq, res_size;
410 void __iomem *base;
411
412 err = of_address_to_resource(np, 0, &res);
413 @@ -136,27 +136,27 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
414 priv->read_reg = sja1000_ofp_read_reg;
415 priv->write_reg = sja1000_ofp_write_reg;
416
417 - prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
418 - if (prop && (prop_size == sizeof(u32)))
419 - priv->can.clock.freq = *prop / 2;
420 + err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
421 + if (!err)
422 + priv->can.clock.freq = prop / 2;
423 else
424 priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
425
426 - prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
427 - if (prop && (prop_size == sizeof(u32)))
428 - priv->ocr |= *prop & OCR_MODE_MASK;
429 + err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
430 + if (!err)
431 + priv->ocr |= prop & OCR_MODE_MASK;
432 else
433 priv->ocr |= OCR_MODE_NORMAL; /* default */
434
435 - prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
436 - if (prop && (prop_size == sizeof(u32)))
437 - priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
438 + err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
439 + if (!err)
440 + priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
441 else
442 priv->ocr |= OCR_TX0_PULLDOWN; /* default */
443
444 - prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
445 - if (prop && (prop_size == sizeof(u32)) && *prop) {
446 - u32 divider = priv->can.clock.freq * 2 / *prop;
447 + err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
448 + if (!err && prop) {
449 + u32 divider = priv->can.clock.freq * 2 / prop;
450
451 if (divider > 1)
452 priv->cdr |= divider / 2 - 1;
453 @@ -166,8 +166,7 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
454 priv->cdr |= CDR_CLK_OFF; /* default */
455 }
456
457 - prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
458 - if (!prop)
459 + if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
460 priv->cdr |= CDR_CBP; /* default */
461
462 priv->irq_flags = IRQF_SHARED;
463 diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
464 index 06b3f0d..c16bea4 100644
465 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
466 +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
467 @@ -648,7 +648,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
468 {0x00008258, 0x00000000},
469 {0x0000825c, 0x40000000},
470 {0x00008260, 0x00080922},
471 - {0x00008264, 0x9bc00010},
472 + {0x00008264, 0x9d400010},
473 {0x00008268, 0xffffffff},
474 {0x0000826c, 0x0000ffff},
475 {0x00008270, 0x00000000},
476 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
477 index de5ee15..41c5237 100644
478 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
479 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
480 @@ -771,7 +771,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
481 * required version.
482 */
483 if (priv->fw_version_major != MAJOR_VERSION_REQ ||
484 - priv->fw_version_minor != MINOR_VERSION_REQ) {
485 + priv->fw_version_minor < MINOR_VERSION_REQ) {
486 dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
487 MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
488 return -EINVAL;
489 diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
490 index 6be2f73..4ce3e1f 100644
491 --- a/drivers/net/wireless/b43/phy_n.c
492 +++ b/drivers/net/wireless/b43/phy_n.c
493 @@ -4582,7 +4582,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
494 #endif
495 #ifdef CONFIG_B43_SSB
496 case B43_BUS_SSB:
497 - /* FIXME */
498 + ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
499 + avoid);
500 break;
501 #endif
502 }
503 diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
504 index b58fef7..1fb9b22 100644
505 --- a/drivers/ssb/driver_chipcommon_pmu.c
506 +++ b/drivers/ssb/driver_chipcommon_pmu.c
507 @@ -645,3 +645,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
508 return 0;
509 }
510 }
511 +
512 +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
513 +{
514 + u32 pmu_ctl = 0;
515 +
516 + switch (cc->dev->bus->chip_id) {
517 + case 0x4322:
518 + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
519 + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
520 + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
521 + if (spuravoid == 1)
522 + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
523 + else
524 + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
525 + pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
526 + break;
527 + case 43222:
528 + /* TODO: BCM43222 requires updating PLLs too */
529 + return;
530 + default:
531 + ssb_printk(KERN_ERR PFX
532 + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
533 + cc->dev->bus->chip_id);
534 + return;
535 + }
536 +
537 + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
538 +}
539 +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
540 diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
541 index 5bf163e..18ded2d 100644
542 --- a/drivers/video/console/fbcon.c
543 +++ b/drivers/video/console/fbcon.c
544 @@ -842,6 +842,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
545 *
546 * Maps a virtual console @unit to a frame buffer device
547 * @newidx.
548 + *
549 + * This should be called with the console lock held.
550 */
551 static int set_con2fb_map(int unit, int newidx, int user)
552 {
553 @@ -859,7 +861,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
554
555 if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
556 info_idx = newidx;
557 - return fbcon_takeover(0);
558 + return do_fbcon_takeover(0);
559 }
560
561 if (oldidx != -1)
562 @@ -867,7 +869,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
563
564 found = search_fb_in_map(newidx);
565
566 - console_lock();
567 con2fb_map[unit] = newidx;
568 if (!err && !found)
569 err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
570 @@ -894,7 +895,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
571 if (!search_fb_in_map(info_idx))
572 info_idx = newidx;
573
574 - console_unlock();
575 return err;
576 }
577
578 @@ -3025,6 +3025,7 @@ static inline int fbcon_unbind(void)
579 }
580 #endif /* CONFIG_VT_HW_CONSOLE_BINDING */
581
582 +/* called with console_lock held */
583 static int fbcon_fb_unbind(int idx)
584 {
585 int i, new_idx = -1, ret = 0;
586 @@ -3051,6 +3052,7 @@ static int fbcon_fb_unbind(int idx)
587 return ret;
588 }
589
590 +/* called with console_lock held */
591 static int fbcon_fb_unregistered(struct fb_info *info)
592 {
593 int i, idx;
594 @@ -3088,6 +3090,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
595 return 0;
596 }
597
598 +/* called with console_lock held */
599 static void fbcon_remap_all(int idx)
600 {
601 int i;
602 @@ -3132,6 +3135,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
603 }
604 #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
605
606 +/* called with console_lock held */
607 static int fbcon_fb_registered(struct fb_info *info)
608 {
609 int ret = 0, i, idx;
610 @@ -3284,6 +3288,7 @@ static int fbcon_event_notify(struct notifier_block *self,
611 ret = fbcon_fb_unregistered(info);
612 break;
613 case FB_EVENT_SET_CONSOLE_MAP:
614 + /* called with console lock held */
615 con2fb = event->data;
616 ret = set_con2fb_map(con2fb->console - 1,
617 con2fb->framebuffer, 1);
618 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
619 index 90f1315..5641a22 100644
620 --- a/drivers/video/fbmem.c
621 +++ b/drivers/video/fbmem.c
622 @@ -1168,8 +1168,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
623 event.data = &con2fb;
624 if (!lock_fb_info(info))
625 return -ENODEV;
626 + console_lock();
627 event.info = info;
628 ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
629 + console_unlock();
630 unlock_fb_info(info);
631 break;
632 case FBIOBLANK:
633 @@ -1362,15 +1364,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
634 {
635 struct fb_info *info = file_fb_info(file);
636 struct fb_ops *fb;
637 - unsigned long off;
638 + unsigned long mmio_pgoff;
639 unsigned long start;
640 u32 len;
641
642 if (!info)
643 return -ENODEV;
644 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
645 - return -EINVAL;
646 - off = vma->vm_pgoff << PAGE_SHIFT;
647 fb = info->fbops;
648 if (!fb)
649 return -ENODEV;
650 @@ -1382,33 +1381,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
651 return res;
652 }
653
654 - /* frame buffer memory */
655 + /*
656 + * Ugh. This can be either the frame buffer mapping, or
657 + * if pgoff points past it, the mmio mapping.
658 + */
659 start = info->fix.smem_start;
660 - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
661 - if (off >= len) {
662 - /* memory mapped io */
663 - off -= len;
664 - if (info->var.accel_flags) {
665 - mutex_unlock(&info->mm_lock);
666 - return -EINVAL;
667 - }
668 + len = info->fix.smem_len;
669 + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
670 + if (vma->vm_pgoff >= mmio_pgoff) {
671 + vma->vm_pgoff -= mmio_pgoff;
672 start = info->fix.mmio_start;
673 - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
674 + len = info->fix.mmio_len;
675 }
676 mutex_unlock(&info->mm_lock);
677 - start &= PAGE_MASK;
678 - if ((vma->vm_end - vma->vm_start + off) > len)
679 - return -EINVAL;
680 - off += start;
681 - vma->vm_pgoff = off >> PAGE_SHIFT;
682 - /* This is an IO map - tell maydump to skip this VMA */
683 - vma->vm_flags |= VM_IO | VM_RESERVED;
684 +
685 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
686 - fb_pgprotect(file, vma, off);
687 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
688 - vma->vm_end - vma->vm_start, vma->vm_page_prot))
689 - return -EAGAIN;
690 - return 0;
691 + fb_pgprotect(file, vma, start);
692 +
693 + return vm_iomap_memory(vma, start, len);
694 }
695
696 static int
697 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
698 index dce89da..3ef7f38 100644
699 --- a/fs/btrfs/tree-log.c
700 +++ b/fs/btrfs/tree-log.c
701 @@ -315,6 +315,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
702 unsigned long src_ptr;
703 unsigned long dst_ptr;
704 int overwrite_root = 0;
705 + bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
706
707 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
708 overwrite_root = 1;
709 @@ -324,6 +325,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
710
711 /* look for the key in the destination tree */
712 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
713 + if (ret < 0)
714 + return ret;
715 +
716 if (ret == 0) {
717 char *src_copy;
718 char *dst_copy;
719 @@ -365,6 +369,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
720 return 0;
721 }
722
723 + /*
724 + * We need to load the old nbytes into the inode so when we
725 + * replay the extents we've logged we get the right nbytes.
726 + */
727 + if (inode_item) {
728 + struct btrfs_inode_item *item;
729 + u64 nbytes;
730 +
731 + item = btrfs_item_ptr(path->nodes[0], path->slots[0],
732 + struct btrfs_inode_item);
733 + nbytes = btrfs_inode_nbytes(path->nodes[0], item);
734 + item = btrfs_item_ptr(eb, slot,
735 + struct btrfs_inode_item);
736 + btrfs_set_inode_nbytes(eb, item, nbytes);
737 + }
738 + } else if (inode_item) {
739 + struct btrfs_inode_item *item;
740 +
741 + /*
742 + * New inode, set nbytes to 0 so that the nbytes comes out
743 + * properly when we replay the extents.
744 + */
745 + item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
746 + btrfs_set_inode_nbytes(eb, item, 0);
747 }
748 insert:
749 btrfs_release_path(path);
750 @@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
751 u64 extent_end;
752 u64 alloc_hint;
753 u64 start = key->offset;
754 - u64 saved_nbytes;
755 + u64 nbytes = 0;
756 struct btrfs_file_extent_item *item;
757 struct inode *inode = NULL;
758 unsigned long size;
759 @@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
760 found_type = btrfs_file_extent_type(eb, item);
761
762 if (found_type == BTRFS_FILE_EXTENT_REG ||
763 - found_type == BTRFS_FILE_EXTENT_PREALLOC)
764 - extent_end = start + btrfs_file_extent_num_bytes(eb, item);
765 - else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
766 + found_type == BTRFS_FILE_EXTENT_PREALLOC) {
767 + nbytes = btrfs_file_extent_num_bytes(eb, item);
768 + extent_end = start + nbytes;
769 +
770 + /*
771 + * We don't add to the inodes nbytes if we are prealloc or a
772 + * hole.
773 + */
774 + if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
775 + nbytes = 0;
776 + } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
777 size = btrfs_file_extent_inline_len(eb, item);
778 + nbytes = btrfs_file_extent_ram_bytes(eb, item);
779 extent_end = (start + size + mask) & ~mask;
780 } else {
781 ret = 0;
782 @@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
783 }
784 btrfs_release_path(path);
785
786 - saved_nbytes = inode_get_bytes(inode);
787 /* drop any overlapping extents */
788 ret = btrfs_drop_extents(trans, inode, start, extent_end,
789 &alloc_hint, 1);
790 @@ -636,7 +672,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
791 BUG_ON(ret);
792 }
793
794 - inode_set_bytes(inode, saved_nbytes);
795 + inode_add_bytes(inode, nbytes);
796 btrfs_update_inode(trans, root, inode);
797 out:
798 if (inode)
799 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
800 index 5849e3e..32b12e5 100644
801 --- a/fs/hfsplus/extents.c
802 +++ b/fs/hfsplus/extents.c
803 @@ -517,7 +517,7 @@ void hfsplus_file_truncate(struct inode *inode)
804 struct address_space *mapping = inode->i_mapping;
805 struct page *page;
806 void *fsdata;
807 - u32 size = inode->i_size;
808 + loff_t size = inode->i_size;
809
810 res = pagecache_write_begin(NULL, mapping, size, 0,
811 AOP_FLAG_UNINTERRUPTIBLE,
812 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
813 index 72cbf08..c6fb815 100644
814 --- a/include/linux/kvm_host.h
815 +++ b/include/linux/kvm_host.h
816 @@ -427,7 +427,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
817 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
818 void *data, unsigned long len);
819 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
820 - gpa_t gpa);
821 + gpa_t gpa, unsigned long len);
822 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
823 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
824 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
825 diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
826 index fa7cc72..b0bcce0 100644
827 --- a/include/linux/kvm_types.h
828 +++ b/include/linux/kvm_types.h
829 @@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
830 u64 generation;
831 gpa_t gpa;
832 unsigned long hva;
833 + unsigned long len;
834 struct kvm_memory_slot *memslot;
835 };
836
837 diff --git a/include/linux/mm.h b/include/linux/mm.h
838 index 441a564..ece5ff4 100644
839 --- a/include/linux/mm.h
840 +++ b/include/linux/mm.h
841 @@ -1507,6 +1507,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
842 unsigned long pfn);
843 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
844 unsigned long pfn);
845 +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
846 +
847
848 struct page *follow_page(struct vm_area_struct *, unsigned long address,
849 unsigned int foll_flags);
850 diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
851 index 1a6b004..29ce7e4 100644
852 --- a/include/linux/ssb/ssb_driver_chipcommon.h
853 +++ b/include/linux/ssb/ssb_driver_chipcommon.h
854 @@ -219,6 +219,7 @@
855 #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
856 #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
857 #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
858 +#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400
859 #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
860 #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
861 #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
862 @@ -661,5 +662,6 @@ enum ssb_pmu_ldo_volt_id {
863 void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
864 enum ssb_pmu_ldo_volt_id id, u32 voltage);
865 void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
866 +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
867
868 #endif /* LINUX_SSB_CHIPCO_H_ */
869 diff --git a/kernel/events/core.c b/kernel/events/core.c
870 index 228fdb0..839a24f 100644
871 --- a/kernel/events/core.c
872 +++ b/kernel/events/core.c
873 @@ -5126,7 +5126,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
874
875 static int perf_swevent_init(struct perf_event *event)
876 {
877 - int event_id = event->attr.config;
878 + u64 event_id = event->attr.config;
879
880 if (event->attr.type != PERF_TYPE_SOFTWARE)
881 return -ENOENT;
882 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
883 index cdd5607..e4cee8d 100644
884 --- a/kernel/hrtimer.c
885 +++ b/kernel/hrtimer.c
886 @@ -61,6 +61,7 @@
887 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
888 {
889
890 + .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
891 .clock_base =
892 {
893 {
894 @@ -1640,8 +1641,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
895 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
896 int i;
897
898 - raw_spin_lock_init(&cpu_base->lock);
899 -
900 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
901 cpu_base->clock_base[i].cpu_base = cpu_base;
902 timerqueue_init_head(&cpu_base->clock_base[i].active);
903 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
904 index e1718bc..4b6c546 100644
905 --- a/kernel/sched/core.c
906 +++ b/kernel/sched/core.c
907 @@ -1653,8 +1653,10 @@ static void try_to_wake_up_local(struct task_struct *p)
908 {
909 struct rq *rq = task_rq(p);
910
911 - BUG_ON(rq != this_rq());
912 - BUG_ON(p == current);
913 + if (WARN_ON_ONCE(rq != this_rq()) ||
914 + WARN_ON_ONCE(p == current))
915 + return;
916 +
917 lockdep_assert_held(&rq->lock);
918
919 if (!raw_spin_trylock(&p->pi_lock)) {
920 diff --git a/kernel/signal.c b/kernel/signal.c
921 index 32b10d4..959df4f 100644
922 --- a/kernel/signal.c
923 +++ b/kernel/signal.c
924 @@ -2867,7 +2867,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
925
926 static int do_tkill(pid_t tgid, pid_t pid, int sig)
927 {
928 - struct siginfo info;
929 + struct siginfo info = {};
930
931 info.si_signo = sig;
932 info.si_errno = 0;
933 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
934 index e427969..69b21bb 100644
935 --- a/mm/hugetlb.c
936 +++ b/mm/hugetlb.c
937 @@ -2906,7 +2906,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
938 break;
939 }
940
941 - if (absent ||
942 + /*
943 + * We need call hugetlb_fault for both hugepages under migration
944 + * (in which case hugetlb_fault waits for the migration,) and
945 + * hwpoisoned hugepages (in which case we need to prevent the
946 + * caller from accessing to them.) In order to do this, we use
947 + * here is_swap_pte instead of is_hugetlb_entry_migration and
948 + * is_hugetlb_entry_hwpoisoned. This is because it simply covers
949 + * both cases, and because we can't follow correct pages
950 + * directly from any kind of swap entries.
951 + */
952 + if (absent || is_swap_pte(huge_ptep_get(pte)) ||
953 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
954 int ret;
955
956 diff --git a/mm/memory.c b/mm/memory.c
957 index 2f42aab..17d8661 100644
958 --- a/mm/memory.c
959 +++ b/mm/memory.c
960 @@ -2329,6 +2329,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
961 }
962 EXPORT_SYMBOL(remap_pfn_range);
963
964 +/**
965 + * vm_iomap_memory - remap memory to userspace
966 + * @vma: user vma to map to
967 + * @start: start of area
968 + * @len: size of area
969 + *
970 + * This is a simplified io_remap_pfn_range() for common driver use. The
971 + * driver just needs to give us the physical memory range to be mapped,
972 + * we'll figure out the rest from the vma information.
973 + *
974 + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
975 + * whatever write-combining details or similar.
976 + */
977 +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
978 +{
979 + unsigned long vm_len, pfn, pages;
980 +
981 + /* Check that the physical memory area passed in looks valid */
982 + if (start + len < start)
983 + return -EINVAL;
984 + /*
985 + * You *really* shouldn't map things that aren't page-aligned,
986 + * but we've historically allowed it because IO memory might
987 + * just have smaller alignment.
988 + */
989 + len += start & ~PAGE_MASK;
990 + pfn = start >> PAGE_SHIFT;
991 + pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
992 + if (pfn + pages < pfn)
993 + return -EINVAL;
994 +
995 + /* We start the mapping 'vm_pgoff' pages into the area */
996 + if (vma->vm_pgoff > pages)
997 + return -EINVAL;
998 + pfn += vma->vm_pgoff;
999 + pages -= vma->vm_pgoff;
1000 +
1001 + /* Can we fit all of the mapping? */
1002 + vm_len = vma->vm_end - vma->vm_start;
1003 + if (vm_len >> PAGE_SHIFT > pages)
1004 + return -EINVAL;
1005 +
1006 + /* Ok, let it rip */
1007 + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1008 +}
1009 +EXPORT_SYMBOL(vm_iomap_memory);
1010 +
1011 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1012 unsigned long addr, unsigned long end,
1013 pte_fn_t fn, void *data)
1014 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
1015 index d535b34..d776291 100644
1016 --- a/sound/core/pcm_native.c
1017 +++ b/sound/core/pcm_native.c
1018 @@ -3209,18 +3209,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
1019 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
1020 struct vm_area_struct *area)
1021 {
1022 - long size;
1023 - unsigned long offset;
1024 + struct snd_pcm_runtime *runtime = substream->runtime;;
1025
1026 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
1027 - area->vm_flags |= VM_IO;
1028 - size = area->vm_end - area->vm_start;
1029 - offset = area->vm_pgoff << PAGE_SHIFT;
1030 - if (io_remap_pfn_range(area, area->vm_start,
1031 - (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
1032 - size, area->vm_page_prot))
1033 - return -EAGAIN;
1034 - return 0;
1035 + return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
1036 }
1037
1038 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
1039 diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
1040 index dcaf272c26..9f477f6 100644
1041 --- a/virt/kvm/ioapic.c
1042 +++ b/virt/kvm/ioapic.c
1043 @@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
1044 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
1045 u64 redir_content;
1046
1047 - ASSERT(redir_index < IOAPIC_NUM_PINS);
1048 + if (redir_index < IOAPIC_NUM_PINS)
1049 + redir_content =
1050 + ioapic->redirtbl[redir_index].bits;
1051 + else
1052 + redir_content = ~0ULL;
1053
1054 - redir_content = ioapic->redirtbl[redir_index].bits;
1055 result = (ioapic->ioregsel & 0x1) ?
1056 (redir_content >> 32) & 0xffffffff :
1057 redir_content & 0xffffffff;
1058 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1059 index 71b9036..bdfbc1b 100644
1060 --- a/virt/kvm/kvm_main.c
1061 +++ b/virt/kvm/kvm_main.c
1062 @@ -1382,21 +1382,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1063 }
1064
1065 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1066 - gpa_t gpa)
1067 + gpa_t gpa, unsigned long len)
1068 {
1069 struct kvm_memslots *slots = kvm_memslots(kvm);
1070 int offset = offset_in_page(gpa);
1071 - gfn_t gfn = gpa >> PAGE_SHIFT;
1072 + gfn_t start_gfn = gpa >> PAGE_SHIFT;
1073 + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1074 + gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1075 + gfn_t nr_pages_avail;
1076
1077 ghc->gpa = gpa;
1078 ghc->generation = slots->generation;
1079 - ghc->memslot = gfn_to_memslot(kvm, gfn);
1080 - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1081 - if (!kvm_is_error_hva(ghc->hva))
1082 + ghc->len = len;
1083 + ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1084 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
1085 + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
1086 ghc->hva += offset;
1087 - else
1088 - return -EFAULT;
1089 -
1090 + } else {
1091 + /*
1092 + * If the requested region crosses two memslots, we still
1093 + * verify that the entire region is valid here.
1094 + */
1095 + while (start_gfn <= end_gfn) {
1096 + ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1097 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1098 + &nr_pages_avail);
1099 + if (kvm_is_error_hva(ghc->hva))
1100 + return -EFAULT;
1101 + start_gfn += nr_pages_avail;
1102 + }
1103 + /* Use the slow path for cross page reads and writes. */
1104 + ghc->memslot = NULL;
1105 + }
1106 return 0;
1107 }
1108 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1109 @@ -1407,8 +1424,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1110 struct kvm_memslots *slots = kvm_memslots(kvm);
1111 int r;
1112
1113 + BUG_ON(len > ghc->len);
1114 +
1115 if (slots->generation != ghc->generation)
1116 - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1117 + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1118 +
1119 + if (unlikely(!ghc->memslot))
1120 + return kvm_write_guest(kvm, ghc->gpa, data, len);
1121
1122 if (kvm_is_error_hva(ghc->hva))
1123 return -EFAULT;
1124 @@ -1428,8 +1450,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1125 struct kvm_memslots *slots = kvm_memslots(kvm);
1126 int r;
1127
1128 + BUG_ON(len > ghc->len);
1129 +
1130 if (slots->generation != ghc->generation)
1131 - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1132 + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1133 +
1134 + if (unlikely(!ghc->memslot))
1135 + return kvm_read_guest(kvm, ghc->gpa, data, len);
1136
1137 if (kvm_is_error_hva(ghc->hva))
1138 return -EFAULT;