Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.23-r1/0102-2.6.23.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 658 - (show annotations) (download)
Mon Jun 23 21:39:39 2008 UTC (15 years, 10 months ago) by niro
File size: 30816 byte(s)
2.6.23-alx-r1: new default as we fix the via epia clocksource=tsc quircks
-linux-2.6.23.17
-fbcondecor-0.9.4
-squashfs-3.3
-unionfs-2.3.3
-ipw3945-1.2.2
-mptbase-vmware fix

1 diff --git a/arch/i386/boot/boot.h b/arch/i386/boot/boot.h
2 index 20bab94..3eeb9e5 100644
3 --- a/arch/i386/boot/boot.h
4 +++ b/arch/i386/boot/boot.h
5 @@ -17,6 +17,8 @@
6 #ifndef BOOT_BOOT_H
7 #define BOOT_BOOT_H
8
9 +#define STACK_SIZE 512 /* Minimum number of bytes for stack */
10 +
11 #ifndef __ASSEMBLY__
12
13 #include <stdarg.h>
14 @@ -198,8 +200,6 @@ static inline int isdigit(int ch)
15 }
16
17 /* Heap -- available for dynamic lists. */
18 -#define STACK_SIZE 512 /* Minimum number of bytes for stack */
19 -
20 extern char _end[];
21 extern char *HEAP;
22 extern char *heap_end;
23 @@ -216,9 +216,9 @@ static inline char *__get_heap(size_t s, size_t a, size_t n)
24 #define GET_HEAP(type, n) \
25 ((type *)__get_heap(sizeof(type),__alignof__(type),(n)))
26
27 -static inline int heap_free(void)
28 +static inline bool heap_free(size_t n)
29 {
30 - return heap_end-HEAP;
31 + return (int)(heap_end-HEAP) >= (int)n;
32 }
33
34 /* copy.S */
35 diff --git a/arch/i386/boot/header.S b/arch/i386/boot/header.S
36 index f3140e5..fff7059 100644
37 --- a/arch/i386/boot/header.S
38 +++ b/arch/i386/boot/header.S
39 @@ -173,7 +173,8 @@ ramdisk_size: .long 0 # its size in bytes
40 bootsect_kludge:
41 .long 0 # obsolete
42
43 -heap_end_ptr: .word _end+1024 # (Header version 0x0201 or later)
44 +heap_end_ptr: .word _end+STACK_SIZE-512
45 + # (Header version 0x0201 or later)
46 # space from here (exclusive) down to
47 # end of setup code can be used by setup
48 # for local heap purposes.
49 @@ -225,28 +226,53 @@ start_of_setup:
50 int $0x13
51 #endif
52
53 -# We will have entered with %cs = %ds+0x20, normalize %cs so
54 -# it is on par with the other segments.
55 - pushw %ds
56 - pushw $setup2
57 - lretw
58 -
59 -setup2:
60 # Force %es = %ds
61 movw %ds, %ax
62 movw %ax, %es
63 cld
64
65 -# Stack paranoia: align the stack and make sure it is good
66 -# for both 16- and 32-bit references. In particular, if we
67 -# were meant to have been using the full 16-bit segment, the
68 -# caller might have set %sp to zero, which breaks %esp-based
69 -# references.
70 - andw $~3, %sp # dword align (might as well...)
71 - jnz 1f
72 - movw $0xfffc, %sp # Make sure we're not zero
73 -1: movzwl %sp, %esp # Clear upper half of %esp
74 - sti
75 +# Apparently some ancient versions of LILO invoked the kernel
76 +# with %ss != %ds, which happened to work by accident for the
77 +# old code. If the CAN_USE_HEAP flag is set in loadflags, or
78 +# %ss != %ds, then adjust the stack pointer.
79 +
80 + # Smallest possible stack we can tolerate
81 + movw $(_end+STACK_SIZE), %cx
82 +
83 + movw heap_end_ptr, %dx
84 + addw $512, %dx
85 + jnc 1f
86 + xorw %dx, %dx # Wraparound - whole segment available
87 +1: testb $CAN_USE_HEAP, loadflags
88 + jnz 2f
89 +
90 + # No CAN_USE_HEAP
91 + movw %ss, %dx
92 + cmpw %ax, %dx # %ds == %ss?
93 + movw %sp, %dx
94 + # If so, assume %sp is reasonably set, otherwise use
95 + # the smallest possible stack.
96 + jne 4f # -> Smallest possible stack...
97 +
98 + # Make sure the stack is at least minimum size. Take a value
99 + # of zero to mean "full segment."
100 +2:
101 + andw $~3, %dx # dword align (might as well...)
102 + jnz 3f
103 + movw $0xfffc, %dx # Make sure we're not zero
104 +3: cmpw %cx, %dx
105 + jnb 5f
106 +4: movw %cx, %dx # Minimum value we can possibly use
107 +5: movw %ax, %ss
108 + movzwl %dx, %esp # Clear upper half of %esp
109 + sti # Now we should have a working stack
110 +
111 +# We will have entered with %cs = %ds+0x20, normalize %cs so
112 +# it is on par with the other segments.
113 + pushw %ds
114 + pushw $6f
115 + lretw
116 +6:
117
118 # Check signature at end of setup
119 cmpl $0x5a5aaa55, setup_sig
120 diff --git a/arch/i386/boot/video-bios.c b/arch/i386/boot/video-bios.c
121 index 68e65d9..ed0672a 100644
122 --- a/arch/i386/boot/video-bios.c
123 +++ b/arch/i386/boot/video-bios.c
124 @@ -79,7 +79,7 @@ static int bios_probe(void)
125 video_bios.modes = GET_HEAP(struct mode_info, 0);
126
127 for (mode = 0x14; mode <= 0x7f; mode++) {
128 - if (heap_free() < sizeof(struct mode_info))
129 + if (!heap_free(sizeof(struct mode_info)))
130 break;
131
132 if (mode_defined(VIDEO_FIRST_BIOS+mode))
133 diff --git a/arch/i386/boot/video-vesa.c b/arch/i386/boot/video-vesa.c
134 index 1921907..4716b9a 100644
135 --- a/arch/i386/boot/video-vesa.c
136 +++ b/arch/i386/boot/video-vesa.c
137 @@ -57,7 +57,7 @@ static int vesa_probe(void)
138 while ((mode = rdfs16(mode_ptr)) != 0xffff) {
139 mode_ptr += 2;
140
141 - if (heap_free() < sizeof(struct mode_info))
142 + if (!heap_free(sizeof(struct mode_info)))
143 break; /* Heap full, can't save mode info */
144
145 if (mode & ~0x1ff)
146 diff --git a/arch/i386/boot/video.c b/arch/i386/boot/video.c
147 index e4ba897..ad9712f 100644
148 --- a/arch/i386/boot/video.c
149 +++ b/arch/i386/boot/video.c
150 @@ -371,7 +371,7 @@ static void save_screen(void)
151 saved.curx = boot_params.screen_info.orig_x;
152 saved.cury = boot_params.screen_info.orig_y;
153
154 - if (heap_free() < saved.x*saved.y*sizeof(u16)+512)
155 + if (!heap_free(saved.x*saved.y*sizeof(u16)+512))
156 return; /* Not enough heap to save the screen */
157
158 saved.data = GET_HEAP(u16, saved.x*saved.y);
159 diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
160 index a39280b..7f6add1 100644
161 --- a/arch/i386/kernel/tsc.c
162 +++ b/arch/i386/kernel/tsc.c
163 @@ -137,7 +137,7 @@ unsigned long native_calculate_cpu_khz(void)
164 {
165 unsigned long long start, end;
166 unsigned long count;
167 - u64 delta64;
168 + u64 delta64 = (u64)ULLONG_MAX;
169 int i;
170 unsigned long flags;
171
172 @@ -149,6 +149,7 @@ unsigned long native_calculate_cpu_khz(void)
173 rdtscll(start);
174 mach_countup(&count);
175 rdtscll(end);
176 + delta64 = min(delta64, (end - start));
177 }
178 /*
179 * Error: ECTCNEVERSET
180 @@ -159,8 +160,6 @@ unsigned long native_calculate_cpu_khz(void)
181 if (count <= 1)
182 goto err;
183
184 - delta64 = end - start;
185 -
186 /* cpu freq too fast: */
187 if (delta64 > (1ULL<<32))
188 goto err;
189 diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c
190 index f01bfcd..1ba2408 100644
191 --- a/arch/i386/xen/enlighten.c
192 +++ b/arch/i386/xen/enlighten.c
193 @@ -56,7 +56,23 @@ DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
194
195 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
196 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
197 -DEFINE_PER_CPU(unsigned long, xen_cr3);
198 +
199 +/*
200 + * Note about cr3 (pagetable base) values:
201 + *
202 + * xen_cr3 contains the current logical cr3 value; it contains the
203 + * last set cr3. This may not be the current effective cr3, because
204 + * its update may be being lazily deferred. However, a vcpu looking
205 + * at its own cr3 can use this value knowing that it everything will
206 + * be self-consistent.
207 + *
208 + * xen_current_cr3 contains the actual vcpu cr3; it is set once the
209 + * hypercall to set the vcpu cr3 is complete (so it may be a little
210 + * out of date, but it will never be set early). If one vcpu is
211 + * looking at another vcpu's cr3 value, it should use this variable.
212 + */
213 +DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
214 +DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
215
216 struct start_info *xen_start_info;
217 EXPORT_SYMBOL_GPL(xen_start_info);
218 @@ -100,7 +116,7 @@ static void __init xen_vcpu_setup(int cpu)
219 info.mfn = virt_to_mfn(vcpup);
220 info.offset = offset_in_page(vcpup);
221
222 - printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %x, offset %d\n",
223 + printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
224 cpu, vcpup, info.mfn, info.offset);
225
226 /* Check to see if the hypervisor will put the vcpu_info
227 @@ -632,32 +648,36 @@ static unsigned long xen_read_cr3(void)
228 return x86_read_percpu(xen_cr3);
229 }
230
231 +static void set_current_cr3(void *v)
232 +{
233 + x86_write_percpu(xen_current_cr3, (unsigned long)v);
234 +}
235 +
236 static void xen_write_cr3(unsigned long cr3)
237 {
238 + struct mmuext_op *op;
239 + struct multicall_space mcs;
240 + unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
241 +
242 BUG_ON(preemptible());
243
244 - if (cr3 == x86_read_percpu(xen_cr3)) {
245 - /* just a simple tlb flush */
246 - xen_flush_tlb();
247 - return;
248 - }
249 + mcs = xen_mc_entry(sizeof(*op)); /* disables interrupts */
250
251 + /* Update while interrupts are disabled, so its atomic with
252 + respect to ipis */
253 x86_write_percpu(xen_cr3, cr3);
254
255 + op = mcs.args;
256 + op->cmd = MMUEXT_NEW_BASEPTR;
257 + op->arg1.mfn = mfn;
258
259 - {
260 - struct mmuext_op *op;
261 - struct multicall_space mcs = xen_mc_entry(sizeof(*op));
262 - unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
263 -
264 - op = mcs.args;
265 - op->cmd = MMUEXT_NEW_BASEPTR;
266 - op->arg1.mfn = mfn;
267 + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
268
269 - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
270 + /* Update xen_update_cr3 once the batch has actually
271 + been submitted. */
272 + xen_mc_callback(set_current_cr3, (void *)cr3);
273
274 - xen_mc_issue(PARAVIRT_LAZY_CPU);
275 - }
276 + xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
277 }
278
279 /* Early in boot, while setting up the initial pagetable, assume
280 @@ -1113,6 +1133,7 @@ asmlinkage void __init xen_start_kernel(void)
281 /* keep using Xen gdt for now; no urgent need to change it */
282
283 x86_write_percpu(xen_cr3, __pa(pgd));
284 + x86_write_percpu(xen_current_cr3, __pa(pgd));
285
286 #ifdef CONFIG_SMP
287 /* Don't do the full vcpu_info placement stuff until we have a
288 diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
289 index 874db0c..c476dfa 100644
290 --- a/arch/i386/xen/mmu.c
291 +++ b/arch/i386/xen/mmu.c
292 @@ -515,20 +515,43 @@ static void drop_other_mm_ref(void *info)
293
294 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
295 leave_mm(smp_processor_id());
296 +
297 + /* If this cpu still has a stale cr3 reference, then make sure
298 + it has been flushed. */
299 + if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
300 + load_cr3(swapper_pg_dir);
301 + arch_flush_lazy_cpu_mode();
302 + }
303 }
304
305 static void drop_mm_ref(struct mm_struct *mm)
306 {
307 + cpumask_t mask;
308 + unsigned cpu;
309 +
310 if (current->active_mm == mm) {
311 if (current->mm == mm)
312 load_cr3(swapper_pg_dir);
313 else
314 leave_mm(smp_processor_id());
315 + arch_flush_lazy_cpu_mode();
316 + }
317 +
318 + /* Get the "official" set of cpus referring to our pagetable. */
319 + mask = mm->cpu_vm_mask;
320 +
321 + /* It's possible that a vcpu may have a stale reference to our
322 + cr3, because its in lazy mode, and it hasn't yet flushed
323 + its set of pending hypercalls yet. In this case, we can
324 + look at its actual current cr3 value, and force it to flush
325 + if needed. */
326 + for_each_online_cpu(cpu) {
327 + if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
328 + cpu_set(cpu, mask);
329 }
330
331 - if (!cpus_empty(mm->cpu_vm_mask))
332 - xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref,
333 - mm, 1);
334 + if (!cpus_empty(mask))
335 + xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
336 }
337 #else
338 static void drop_mm_ref(struct mm_struct *mm)
339 diff --git a/arch/i386/xen/multicalls.c b/arch/i386/xen/multicalls.c
340 index c837e8e..ce9c4b4 100644
341 --- a/arch/i386/xen/multicalls.c
342 +++ b/arch/i386/xen/multicalls.c
343 @@ -32,7 +32,11 @@
344 struct mc_buffer {
345 struct multicall_entry entries[MC_BATCH];
346 u64 args[MC_ARGS];
347 - unsigned mcidx, argidx;
348 + struct callback {
349 + void (*fn)(void *);
350 + void *data;
351 + } callbacks[MC_BATCH];
352 + unsigned mcidx, argidx, cbidx;
353 };
354
355 static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
356 @@ -43,6 +47,7 @@ void xen_mc_flush(void)
357 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
358 int ret = 0;
359 unsigned long flags;
360 + int i;
361
362 BUG_ON(preemptible());
363
364 @@ -51,8 +56,6 @@ void xen_mc_flush(void)
365 local_irq_save(flags);
366
367 if (b->mcidx) {
368 - int i;
369 -
370 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
371 BUG();
372 for (i = 0; i < b->mcidx; i++)
373 @@ -65,6 +68,13 @@ void xen_mc_flush(void)
374
375 local_irq_restore(flags);
376
377 + for(i = 0; i < b->cbidx; i++) {
378 + struct callback *cb = &b->callbacks[i];
379 +
380 + (*cb->fn)(cb->data);
381 + }
382 + b->cbidx = 0;
383 +
384 BUG_ON(ret);
385 }
386
387 @@ -88,3 +98,16 @@ struct multicall_space __xen_mc_entry(size_t args)
388
389 return ret;
390 }
391 +
392 +void xen_mc_callback(void (*fn)(void *), void *data)
393 +{
394 + struct mc_buffer *b = &__get_cpu_var(mc_buffer);
395 + struct callback *cb;
396 +
397 + if (b->cbidx == MC_BATCH)
398 + xen_mc_flush();
399 +
400 + cb = &b->callbacks[b->cbidx++];
401 + cb->fn = fn;
402 + cb->data = data;
403 +}
404 diff --git a/arch/i386/xen/multicalls.h b/arch/i386/xen/multicalls.h
405 index e6f7530..e3ed9c8 100644
406 --- a/arch/i386/xen/multicalls.h
407 +++ b/arch/i386/xen/multicalls.h
408 @@ -42,4 +42,7 @@ static inline void xen_mc_issue(unsigned mode)
409 local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
410 }
411
412 +/* Set up a callback to be called when the current batch is flushed */
413 +void xen_mc_callback(void (*fn)(void *), void *data);
414 +
415 #endif /* _XEN_MULTICALLS_H */
416 diff --git a/arch/i386/xen/xen-ops.h b/arch/i386/xen/xen-ops.h
417 index b9aaea4..c69708b 100644
418 --- a/arch/i386/xen/xen-ops.h
419 +++ b/arch/i386/xen/xen-ops.h
420 @@ -11,6 +11,7 @@ void xen_copy_trap_info(struct trap_info *traps);
421
422 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
423 DECLARE_PER_CPU(unsigned long, xen_cr3);
424 +DECLARE_PER_CPU(unsigned long, xen_current_cr3);
425
426 extern struct start_info *xen_start_info;
427 extern struct shared_info *HYPERVISOR_shared_info;
428 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
429 index bad5719..9da2a42 100644
430 --- a/arch/mips/mm/c-r4k.c
431 +++ b/arch/mips/mm/c-r4k.c
432 @@ -360,11 +360,26 @@ static void r4k___flush_cache_all(void)
433 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
434 }
435
436 +static inline int has_valid_asid(const struct mm_struct *mm)
437 +{
438 +#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
439 + int i;
440 +
441 + for_each_online_cpu(i)
442 + if (cpu_context(i, mm))
443 + return 1;
444 +
445 + return 0;
446 +#else
447 + return cpu_context(smp_processor_id(), mm);
448 +#endif
449 +}
450 +
451 static inline void local_r4k_flush_cache_range(void * args)
452 {
453 struct vm_area_struct *vma = args;
454
455 - if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
456 + if (!(has_valid_asid(vma->vm_mm)))
457 return;
458
459 r4k_blast_dcache();
460 @@ -383,7 +398,7 @@ static inline void local_r4k_flush_cache_mm(void * args)
461 {
462 struct mm_struct *mm = args;
463
464 - if (!cpu_context(smp_processor_id(), mm))
465 + if (!has_valid_asid(mm))
466 return;
467
468 /*
469 @@ -434,7 +449,7 @@ static inline void local_r4k_flush_cache_page(void *args)
470 * If ownes no valid ASID yet, cannot possibly have gotten
471 * this page into the cache.
472 */
473 - if (cpu_context(smp_processor_id(), mm) == 0)
474 + if (!has_valid_asid(mm))
475 return;
476
477 addr &= PAGE_MASK;
478 diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
479 index 69058b2..381306b 100644
480 --- a/arch/powerpc/math-emu/math.c
481 +++ b/arch/powerpc/math-emu/math.c
482 @@ -407,11 +407,16 @@ do_mathemu(struct pt_regs *regs)
483
484 case XE:
485 idx = (insn >> 16) & 0x1f;
486 - if (!idx)
487 - goto illegal;
488 -
489 op0 = (void *)&current->thread.fpr[(insn >> 21) & 0x1f];
490 - op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]);
491 + if (!idx) {
492 + if (((insn >> 1) & 0x3ff) == STFIWX)
493 + op1 = (void *)(regs->gpr[(insn >> 11) & 0x1f]);
494 + else
495 + goto illegal;
496 + } else {
497 + op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]);
498 + }
499 +
500 break;
501
502 case XEU:
503 diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
504 index 4c9ab5b..c767065 100644
505 --- a/arch/powerpc/platforms/cell/axon_msi.c
506 +++ b/arch/powerpc/platforms/cell/axon_msi.c
507 @@ -126,7 +126,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
508 const phandle *ph;
509 struct axon_msic *msic = NULL;
510
511 - dn = pci_device_to_OF_node(dev);
512 + dn = of_node_get(pci_device_to_OF_node(dev));
513 if (!dn) {
514 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
515 return NULL;
516 @@ -183,7 +183,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
517 int len;
518 const u32 *prop;
519
520 - dn = pci_device_to_OF_node(dev);
521 + dn = of_node_get(pci_device_to_OF_node(dev));
522 if (!dn) {
523 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
524 return -ENODEV;
525 diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
526 index d108eeb..6bf7bcd 100644
527 --- a/arch/sparc64/kernel/sys_sparc.c
528 +++ b/arch/sparc64/kernel/sys_sparc.c
529 @@ -319,7 +319,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
530
531 if (flags & MAP_FIXED) {
532 /* Ok, don't mess with it. */
533 - return get_unmapped_area(NULL, addr, len, pgoff, flags);
534 + return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
535 }
536 flags &= ~MAP_SHARED;
537
538 diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
539 index a79c888..f44f58f 100644
540 --- a/arch/sparc64/lib/xor.S
541 +++ b/arch/sparc64/lib/xor.S
542 @@ -491,12 +491,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
543 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */
544 xor %g2, %i4, %g2
545 xor %g3, %i5, %g3
546 - ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
547 + ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
548 xor %l0, %g2, %l0
549 xor %l1, %g3, %l1
550 stxa %l0, [%i0 + 0x00] %asi
551 stxa %l1, [%i0 + 0x08] %asi
552 - ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
553 + ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
554 ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */
555
556 xor %i4, %i2, %i4
557 @@ -504,12 +504,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
558 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */
559 xor %g2, %i4, %g2
560 xor %g3, %i5, %g3
561 - ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
562 + ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
563 xor %l0, %g2, %l0
564 xor %l1, %g3, %l1
565 stxa %l0, [%i0 + 0x10] %asi
566 stxa %l1, [%i0 + 0x18] %asi
567 - ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
568 + ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
569 ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */
570
571 xor %i4, %i2, %i4
572 @@ -517,12 +517,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
573 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */
574 xor %g2, %i4, %g2
575 xor %g3, %i5, %g3
576 - ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
577 + ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
578 xor %l0, %g2, %l0
579 xor %l1, %g3, %l1
580 stxa %l0, [%i0 + 0x20] %asi
581 stxa %l1, [%i0 + 0x28] %asi
582 - ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
583 + ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
584 ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */
585
586 prefetch [%i1 + 0x40], #one_read
587 diff --git a/arch/um/Makefile b/arch/um/Makefile
588 index 989224f..c3a399e 100644
589 --- a/arch/um/Makefile
590 +++ b/arch/um/Makefile
591 @@ -60,7 +60,8 @@ SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH)
592
593 CFLAGS += $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
594 $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
595 - -Din6addr_loopback=kernel_in6addr_loopback
596 + -Din6addr_loopback=kernel_in6addr_loopback \
597 + -Din6addr_any=kernel_in6addr_any
598
599 AFLAGS += $(ARCH_INCLUDE)
600
601 diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h
602 index 6eee343..2378ff4 100644
603 --- a/arch/um/include/common-offsets.h
604 +++ b/arch/um/include/common-offsets.h
605 @@ -10,6 +10,7 @@ OFFSET(HOST_TASK_PID, task_struct, pid);
606
607 DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE);
608 DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK);
609 +DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT);
610 DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
611
612 DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
613 diff --git a/arch/um/include/sysdep-i386/stub.h b/arch/um/include/sysdep-i386/stub.h
614 index 4fffae7..19c85f3 100644
615 --- a/arch/um/include/sysdep-i386/stub.h
616 +++ b/arch/um/include/sysdep-i386/stub.h
617 @@ -9,7 +9,6 @@
618 #include <sys/mman.h>
619 #include <asm/ptrace.h>
620 #include <asm/unistd.h>
621 -#include <asm/page.h>
622 #include "stub-data.h"
623 #include "kern_constants.h"
624 #include "uml-config.h"
625 @@ -19,7 +18,7 @@ extern void stub_clone_handler(void);
626
627 #define STUB_SYSCALL_RET EAX
628 #define STUB_MMAP_NR __NR_mmap2
629 -#define MMAP_OFFSET(o) ((o) >> PAGE_SHIFT)
630 +#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
631
632 static inline long stub_syscall0(long syscall)
633 {
634 diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
635 index 47b812b..885a125 100644
636 --- a/arch/um/kernel/skas/clone.c
637 +++ b/arch/um/kernel/skas/clone.c
638 @@ -3,7 +3,6 @@
639 #include <sys/mman.h>
640 #include <sys/time.h>
641 #include <asm/unistd.h>
642 -#include <asm/page.h>
643 #include "ptrace_user.h"
644 #include "skas.h"
645 #include "stub-data.h"
646 diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
647 index e85f499..919c25b 100644
648 --- a/arch/um/os-Linux/main.c
649 +++ b/arch/um/os-Linux/main.c
650 @@ -12,7 +12,6 @@
651 #include <sys/resource.h>
652 #include <sys/mman.h>
653 #include <sys/user.h>
654 -#include <asm/page.h>
655 #include "kern_util.h"
656 #include "as-layout.h"
657 #include "mem_user.h"
658 diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
659 index 0f7df4e..9fbf210 100644
660 --- a/arch/um/os-Linux/skas/mem.c
661 +++ b/arch/um/os-Linux/skas/mem.c
662 @@ -9,7 +9,6 @@
663 #include <unistd.h>
664 #include <sys/mman.h>
665 #include <sys/wait.h>
666 -#include <asm/page.h>
667 #include <asm/unistd.h>
668 #include "mem_user.h"
669 #include "mem.h"
670 diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
671 index ba9af8d..607d2b8 100644
672 --- a/arch/um/os-Linux/skas/process.c
673 +++ b/arch/um/os-Linux/skas/process.c
674 @@ -182,7 +182,7 @@ static int userspace_tramp(void *stack)
675
676 ptrace(PTRACE_TRACEME, 0, 0, 0);
677
678 - init_new_thread_signals();
679 + signal(SIGTERM, SIG_DFL);
680 err = set_interval(1);
681 if(err)
682 panic("userspace_tramp - setting timer failed, errno = %d\n",
683 diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
684 index 46f6139..f4f2981 100644
685 --- a/arch/um/os-Linux/start_up.c
686 +++ b/arch/um/os-Linux/start_up.c
687 @@ -19,7 +19,6 @@
688 #include <sys/mman.h>
689 #include <sys/resource.h>
690 #include <asm/unistd.h>
691 -#include <asm/page.h>
692 #include <sys/types.h>
693 #include "kern_util.h"
694 #include "user.h"
695 diff --git a/arch/um/os-Linux/tt.c b/arch/um/os-Linux/tt.c
696 index bcf9359..5dc113d 100644
697 --- a/arch/um/os-Linux/tt.c
698 +++ b/arch/um/os-Linux/tt.c
699 @@ -17,7 +17,6 @@
700 #include <sys/mman.h>
701 #include <asm/ptrace.h>
702 #include <asm/unistd.h>
703 -#include <asm/page.h>
704 #include "kern_util.h"
705 #include "user.h"
706 #include "signal_kern.h"
707 diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
708 index 7cbcf48..ef09543 100644
709 --- a/arch/um/os-Linux/util.c
710 +++ b/arch/um/os-Linux/util.c
711 @@ -105,6 +105,44 @@ int setjmp_wrapper(void (*proc)(void *, void *), ...)
712
713 void os_dump_core(void)
714 {
715 + int pid;
716 +
717 signal(SIGSEGV, SIG_DFL);
718 +
719 + /*
720 + * We are about to SIGTERM this entire process group to ensure that
721 + * nothing is around to run after the kernel exits. The
722 + * kernel wants to abort, not die through SIGTERM, so we
723 + * ignore it here.
724 + */
725 +
726 + signal(SIGTERM, SIG_IGN);
727 + kill(0, SIGTERM);
728 + /*
729 + * Most of the other processes associated with this UML are
730 + * likely sTopped, so give them a SIGCONT so they see the
731 + * SIGTERM.
732 + */
733 + kill(0, SIGCONT);
734 +
735 + /*
736 + * Now, having sent signals to everyone but us, make sure they
737 + * die by ptrace. Processes can survive what's been done to
738 + * them so far - the mechanism I understand is receiving a
739 + * SIGSEGV and segfaulting immediately upon return. There is
740 + * always a SIGSEGV pending, and (I'm guessing) signals are
741 + * processed in numeric order so the SIGTERM (signal 15 vs
742 + * SIGSEGV being signal 11) is never handled.
743 + *
744 + * Run a waitpid loop until we get some kind of error.
745 + * Hopefully, it's ECHILD, but there's not a lot we can do if
746 + * it's something else. Tell os_kill_ptraced_process not to
747 + * wait for the child to report its death because there's
748 + * nothing reasonable to do if that fails.
749 + */
750 +
751 + while ((pid = waitpid(-1, NULL, WNOHANG)) > 0)
752 + os_kill_ptraced_process(pid, 0);
753 +
754 abort();
755 }
756 diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
757 index 29118cf..5142415 100644
758 --- a/arch/um/sys-i386/user-offsets.c
759 +++ b/arch/um/sys-i386/user-offsets.c
760 @@ -2,9 +2,9 @@
761 #include <stddef.h>
762 #include <signal.h>
763 #include <sys/poll.h>
764 +#include <sys/user.h>
765 #include <sys/mman.h>
766 #include <asm/ptrace.h>
767 -#include <asm/user.h>
768
769 #define DEFINE(sym, val) \
770 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
771 @@ -48,8 +48,8 @@ void foo(void)
772 OFFSET(HOST_SC_FP_ST, _fpstate, _st);
773 OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env);
774
775 - DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct));
776 - DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct));
777 + DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
778 + DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fpxregs_struct));
779
780 DEFINE(HOST_IP, EIP);
781 DEFINE(HOST_SP, UESP);
782 diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
783 index 0d5fd76..f1ef2a8 100644
784 --- a/arch/um/sys-x86_64/user-offsets.c
785 +++ b/arch/um/sys-x86_64/user-offsets.c
786 @@ -3,17 +3,10 @@
787 #include <signal.h>
788 #include <sys/poll.h>
789 #include <sys/mman.h>
790 +#include <sys/user.h>
791 #define __FRAME_OFFSETS
792 #include <asm/ptrace.h>
793 #include <asm/types.h>
794 -/* For some reason, x86_64 defines u64 and u32 only in <pci/types.h>, which I
795 - * refuse to include here, even though they're used throughout the headers.
796 - * These are used in asm/user.h, and that include can't be avoided because of
797 - * the sizeof(struct user_regs_struct) below.
798 - */
799 -typedef __u64 u64;
800 -typedef __u32 u32;
801 -#include <asm/user.h>
802
803 #define DEFINE(sym, val) \
804 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
805 diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
806 index 458893b..e2d6bad 100644
807 --- a/arch/x86_64/mm/init.c
808 +++ b/arch/x86_64/mm/init.c
809 @@ -734,12 +734,6 @@ int in_gate_area_no_task(unsigned long addr)
810 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
811 }
812
813 -void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
814 -{
815 - return __alloc_bootmem_core(pgdat->bdata, size,
816 - SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
817 -}
818 -
819 const char *arch_vma_name(struct vm_area_struct *vma)
820 {
821 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
822 diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
823 index 10b9809..0416ffb 100644
824 --- a/arch/x86_64/mm/pageattr.c
825 +++ b/arch/x86_64/mm/pageattr.c
826 @@ -229,9 +229,14 @@ void global_flush_tlb(void)
827 struct page *pg, *next;
828 struct list_head l;
829
830 - down_read(&init_mm.mmap_sem);
831 + /*
832 + * Write-protect the semaphore, to exclude two contexts
833 + * doing a list_replace_init() call in parallel and to
834 + * exclude new additions to the deferred_pages list:
835 + */
836 + down_write(&init_mm.mmap_sem);
837 list_replace_init(&deferred_pages, &l);
838 - up_read(&init_mm.mmap_sem);
839 + up_write(&init_mm.mmap_sem);
840
841 flush_map(&l);
842
843 diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
844 index b0f0e58..be9e65b 100644
845 --- a/fs/xfs/linux-2.6/xfs_buf.c
846 +++ b/fs/xfs/linux-2.6/xfs_buf.c
847 @@ -187,6 +187,19 @@ free_address(
848 {
849 a_list_t *aentry;
850
851 +#ifdef CONFIG_XEN
852 + /*
853 + * Xen needs to be able to make sure it can get an exclusive
854 + * RO mapping of pages it wants to turn into a pagetable. If
855 + * a newly allocated page is also still being vmap()ed by xfs,
856 + * it will cause pagetable construction to fail. This is a
857 + * quick workaround to always eagerly unmap pages so that Xen
858 + * is happy.
859 + */
860 + vunmap(addr);
861 + return;
862 +#endif
863 +
864 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
865 if (likely(aentry)) {
866 spin_lock(&as_lock);
867 diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
868 index 6a5fa32..684f622 100644
869 --- a/include/asm-mips/hazards.h
870 +++ b/include/asm-mips/hazards.h
871 @@ -10,11 +10,12 @@
872 #ifndef _ASM_HAZARDS_H
873 #define _ASM_HAZARDS_H
874
875 -
876 #ifdef __ASSEMBLY__
877 #define ASMMACRO(name, code...) .macro name; code; .endm
878 #else
879
880 +#include <asm/cpu-features.h>
881 +
882 #define ASMMACRO(name, code...) \
883 __asm__(".macro " #name "; " #code "; .endm"); \
884 \
885 @@ -86,6 +87,57 @@ do { \
886 : "=r" (tmp)); \
887 } while (0)
888
889 +#elif defined(CONFIG_CPU_MIPSR1)
890 +
891 +/*
892 + * These are slightly complicated by the fact that we guarantee R1 kernels to
893 + * run fine on R2 processors.
894 + */
895 +ASMMACRO(mtc0_tlbw_hazard,
896 + _ssnop; _ssnop; _ehb
897 + )
898 +ASMMACRO(tlbw_use_hazard,
899 + _ssnop; _ssnop; _ssnop; _ehb
900 + )
901 +ASMMACRO(tlb_probe_hazard,
902 + _ssnop; _ssnop; _ssnop; _ehb
903 + )
904 +ASMMACRO(irq_enable_hazard,
905 + _ssnop; _ssnop; _ssnop; _ehb
906 + )
907 +ASMMACRO(irq_disable_hazard,
908 + _ssnop; _ssnop; _ssnop; _ehb
909 + )
910 +ASMMACRO(back_to_back_c0_hazard,
911 + _ssnop; _ssnop; _ssnop; _ehb
912 + )
913 +/*
914 + * gcc has a tradition of misscompiling the previous construct using the
915 + * address of a label as argument to inline assembler. Gas otoh has the
916 + * annoying difference between la and dla which are only usable for 32-bit
917 + * rsp. 64-bit code, so can't be used without conditional compilation.
918 + * The alterantive is switching the assembler to 64-bit code which happens
919 + * to work right even for 32-bit code ...
920 + */
921 +#define __instruction_hazard() \
922 +do { \
923 + unsigned long tmp; \
924 + \
925 + __asm__ __volatile__( \
926 + " .set mips64r2 \n" \
927 + " dla %0, 1f \n" \
928 + " jr.hb %0 \n" \
929 + " .set mips0 \n" \
930 + "1: \n" \
931 + : "=r" (tmp)); \
932 +} while (0)
933 +
934 +#define instruction_hazard() \
935 +do { \
936 + if (cpu_has_mips_r2) \
937 + __instruction_hazard(); \
938 +} while (0)
939 +
940 #elif defined(CONFIG_CPU_R10000)
941
942 /*
943 diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
944 index c83534e..0365ec9 100644
945 --- a/include/linux/bootmem.h
946 +++ b/include/linux/bootmem.h
947 @@ -59,7 +59,6 @@ extern void *__alloc_bootmem_core(struct bootmem_data *bdata,
948 unsigned long align,
949 unsigned long goal,
950 unsigned long limit);
951 -extern void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size);
952
953 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
954 extern void reserve_bootmem(unsigned long addr, unsigned long size);
955 diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
956 index ff61ea3..b05d8a6 100644
957 --- a/include/xen/interface/vcpu.h
958 +++ b/include/xen/interface/vcpu.h
959 @@ -160,8 +160,9 @@ struct vcpu_set_singleshot_timer {
960 */
961 #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
962 struct vcpu_register_vcpu_info {
963 - uint32_t mfn; /* mfn of page to place vcpu_info */
964 - uint32_t offset; /* offset within page */
965 + uint64_t mfn; /* mfn of page to place vcpu_info */
966 + uint32_t offset; /* offset within page */
967 + uint32_t rsvd; /* unused */
968 };
969
970 #endif /* __XEN_PUBLIC_VCPU_H__ */
971 diff --git a/mm/sparse.c b/mm/sparse.c
972 index 239f5a7..1facdff 100644
973 --- a/mm/sparse.c
974 +++ b/mm/sparse.c
975 @@ -215,12 +215,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
976 return 1;
977 }
978
979 -__attribute__((weak)) __init
980 -void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
981 -{
982 - return NULL;
983 -}
984 -
985 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
986 {
987 struct page *map;
988 @@ -231,11 +225,6 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
989 if (map)
990 return map;
991
992 - map = alloc_bootmem_high_node(NODE_DATA(nid),
993 - sizeof(struct page) * PAGES_PER_SECTION);
994 - if (map)
995 - return map;
996 -
997 map = alloc_bootmem_node(NODE_DATA(nid),
998 sizeof(struct page) * PAGES_PER_SECTION);
999 if (map)