Contents of /trunk/kernel26-xen/patches-2.6.25-r1/1045-2.6.25-xen-Some-xen-asm.S-x86_64-code.patch
Parent Directory | Revision Log
Revision 606 -
(show annotations)
(download)
Thu May 22 23:13:13 2008 UTC (15 years, 11 months ago) by niro
File size: 3658 byte(s)
Thu May 22 23:13:13 2008 UTC (15 years, 11 months ago) by niro
File size: 3658 byte(s)
-ver bump to 2.6.25-magellan-r1: - linux-2.6.25.4 - fbcondecor-0.9.4 - squashfs-3.3 - unionfs-2.3.3 - tuxonice-3.0-rc7 - linux-phc-0.3.0 - acpi-dstd-0.9a - reiser4 - xen-3.2.0 . ipw3945-1.2.2
1 | From e48008fb1c7ecc9686da52875c1687485a0cd613 Mon Sep 17 00:00:00 2001 |
2 | From: Eduardo Habkost <ehabkost@redhat.com> |
3 | Date: Thu, 29 Nov 2007 16:06:30 -0200 |
4 | Subject: [PATCH] Some xen-asm.S x86_64 code |
5 | |
6 | Funny preprocessor tricks to make the same code work on both arches |
7 | included. |
8 | |
9 | Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> |
10 | --- |
11 | arch/x86/xen/xen-asm.S | 55 +++++++++++++++++++++++++++++++++++++++++++++++- |
12 | 1 files changed, 54 insertions(+), 1 deletions(-) |
13 | |
14 | diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S |
15 | index 6b71904..af6abfa 100644 |
16 | --- a/arch/x86/xen/xen-asm.S |
17 | +++ b/arch/x86/xen/xen-asm.S |
18 | @@ -24,6 +24,28 @@ |
19 | #define RELOC(x, v) .globl x##_reloc; x##_reloc=v |
20 | #define ENDPATCH(x) .globl x##_end; x##_end=. |
21 | |
22 | +#ifdef CONFIG_X86_64 |
23 | +# define SUFFIX q |
24 | +# define REGPREF r |
25 | +#else |
26 | +# define SUFFIX l |
27 | +# define REGPREF e |
28 | +#endif |
29 | + |
30 | +#define __REG(pref, reg) %pref##reg |
31 | +#define _REG(pref, reg) __REG(pref, reg) |
32 | +#define REG(reg) _REG(REGPREF, reg) |
33 | + |
34 | +#define __INSN(in, suff) in##suff |
35 | +#define _INSN(in, suff) __INSN(in, suff) |
36 | +#define INSN(in) _INSN(in, SUFFIX) |
37 | + |
38 | +#define rAX REG(ax) |
39 | +#define rSP REG(sp) |
40 | +#define MOV INSN(mov) |
41 | +#define AND INSN(and) |
42 | + |
43 | + |
44 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ |
45 | #define XEN_EFLAGS_NMI 0x80000000 |
46 | |
47 | @@ -34,14 +56,24 @@ |
48 | */ |
49 | ENTRY(xen_irq_enable_direct) |
50 | /* Unmask events */ |
51 | +#ifdef PER_CPU_VAR |
52 | movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask |
53 | +#else |
54 | + PER_CPU(xen_vcpu_info, rAX) |
55 | + movb $0, XEN_vcpu_info_mask(rAX) |
56 | +#endif |
57 | |
58 | /* Preempt here doesn't matter because that will deal with |
59 | any pending interrupts. The pending check may end up being |
60 | run on the wrong CPU, but that doesn't hurt. */ |
61 | |
62 | /* Test for pending */ |
63 | +#ifdef PER_CPU_VAR |
64 | testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending |
65 | +#else |
66 | + /* rAX already points to xen_vcpu_info */ |
67 | + testb $0xff, XEN_vcpu_info_pending(rAX) |
68 | +#endif |
69 | jz 1f |
70 | |
71 | 2: call check_events |
72 | @@ -57,7 +89,12 @@ ENDPATCH(xen_irq_enable_direct) |
73 | non-zero. |
74 | */ |
75 | ENTRY(xen_irq_disable_direct) |
76 | +#ifdef PER_CPU_VAR |
77 | movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask |
78 | +#else |
79 | + PER_CPU(xen_vcpu_info, rAX) |
80 | + movb $1, XEN_vcpu_info_mask(rAX) |
81 | +#endif |
82 | ENDPATCH(xen_irq_disable_direct) |
83 | ret |
84 | ENDPROC(xen_irq_disable_direct) |
85 | @@ -73,7 +110,12 @@ ENDPATCH(xen_irq_disable_direct) |
86 | Xen and x86 use opposite senses (mask vs enable). |
87 | */ |
88 | ENTRY(xen_save_fl_direct) |
89 | +#ifdef PER_CPU_VAR |
90 | testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask |
91 | +#else |
92 | + PER_CPU(xen_vcpu_info, rAX) |
93 | + testb $0xff, XEN_vcpu_info_mask(rAX) |
94 | +#endif |
95 | setz %ah |
96 | addb %ah,%ah |
97 | ENDPATCH(xen_save_fl_direct) |
98 | @@ -92,13 +134,23 @@ ENDPATCH(xen_save_fl_direct) |
99 | */ |
100 | ENTRY(xen_restore_fl_direct) |
101 | testb $X86_EFLAGS_IF>>8, %ah |
102 | +#ifdef PER_CPU_VAR |
103 | setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask |
104 | +#else |
105 | + PER_CPU(xen_vcpu_info, rAX) |
106 | + setz XEN_vcpu_info_mask(rAX) |
107 | +#endif |
108 | /* Preempt here doesn't matter because that will deal with |
109 | any pending interrupts. The pending check may end up being |
110 | run on the wrong CPU, but that doesn't hurt. */ |
111 | |
112 | /* check for unmasked and pending */ |
113 | +#ifdef PER_CPU_VAR |
114 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending |
115 | +#else |
116 | + /* rAX already points to xen_vcpu_info */ |
117 | + cmpw $0x0001, XEN_vcpu_info_pending(rAX) |
118 | +#endif |
119 | jz 1f |
120 | 2: call check_events |
121 | 1: |
122 | @@ -143,7 +195,8 @@ ENDPATCH(xen_restore_fl_direct) |
123 | */ |
124 | ENTRY(xen_iret_direct) |
125 | /* test eflags for special cases */ |
126 | - testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) |
127 | + /*FIXME: use right offset for rFLAGS */ |
128 | + testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(rSP) |
129 | jnz hyper_iret |
130 | |
131 | push %eax |
132 | -- |
133 | 1.5.4.1 |
134 |