Magellan Linux

Contents of /trunk/kernel26-xen/patches-2.6.25-r1/1128-2.6.25-xen-Trying-to-organize-the-xen-bootmem-allocation-mess.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 606 - (show annotations) (download)
Thu May 22 23:13:13 2008 UTC (16 years ago) by niro
File size: 6659 byte(s)
-ver bump to 2.6.25-magellan-r1:
- linux-2.6.25.4
- fbcondecor-0.9.4
- squashfs-3.3
- unionfs-2.3.3
- tuxonice-3.0-rc7
- linux-phc-0.3.0
- acpi-dstd-0.9a
- reiser4
- xen-3.2.0
. ipw3945-1.2.2

1 From cc3a5330ad96089d6e72ed3dbb7e9f261a4127b3 Mon Sep 17 00:00:00 2001
2 From: Eduardo Habkost <ehabkost@redhat.com>
3 Date: Wed, 23 Jan 2008 18:51:58 -0200
4 Subject: [PATCH] Trying to organize the xen bootmem allocation mess (REVIEWME)
5
6 See comments added to code to understand this.
7
8 Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
9 ---
10 arch/x86/mm/init_64.c | 47 +++++++++++++++++++++++++++++++++------------
11 arch/x86/xen/enlighten.c | 15 +++++++++++++-
12 arch/x86/xen/init.h | 2 +-
13 3 files changed, 49 insertions(+), 15 deletions(-)
14
15 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
16 index e151e93..e1a071f 100644
17 --- a/arch/x86/mm/init_64.c
18 +++ b/arch/x86/mm/init_64.c
19 @@ -275,9 +275,9 @@ static __init void *spp_getpage(void)
20
21 if (after_bootmem)
22 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
23 - else if (start_pfn < table_end) {
24 - ptr = __va(start_pfn << PAGE_SHIFT);
25 - start_pfn++;
26 + else if (xen_alloc_pfn < table_end) {
27 + ptr = __va(xen_alloc_pfn << PAGE_SHIFT);
28 + xen_alloc_pfn++;
29 memset(ptr, 0, PAGE_SIZE);
30 } else
31 ptr = alloc_bootmem_pages(PAGE_SIZE);
32 @@ -410,7 +410,7 @@ native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
33
34 /*FIXME: this code should be enabled at runtime, somehow */
35
36 -unsigned long start_pfn;
37 +unsigned long xen_alloc_pfn;
38
39 static __meminit void *alloc_low_page(unsigned long *phys)
40 {
41 @@ -423,7 +423,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
42 return adr;
43 }
44
45 - pfn = start_pfn++;
46 + pfn = xen_alloc_pfn++;
47 *phys = pfn << PAGE_SHIFT;
48
49 /* The address returned by __va() is not available yet.
50 @@ -456,7 +456,7 @@ int make_readonly(unsigned long paddr)
51 /* Make old page tables read-only. */
52 if (!xen_feature(XENFEAT_writable_page_tables)
53 && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
54 - && (paddr < (start_pfn << PAGE_SHIFT)))
55 + && (paddr < (xen_alloc_pfn << PAGE_SHIFT)))
56 readonly = 1;
57
58 /*
59 @@ -516,7 +516,7 @@ static void xen_finish_init_mapping(void)
60 WARN_ON(HYPERVISOR_update_va_mapping(
61 start, __pte_ma(0), 0));
62
63 - /* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
64 + /* Allocate pte's for initial fixmaps from 'xen_alloc_pfn' allocator. */
65 table_end = ~0UL;
66
67
68 @@ -551,8 +551,8 @@ static void xen_finish_init_mapping(void)
69 >> PAGE_SHIFT,
70 PAGE_KERNEL_RO));
71
72 - /* Disable the 'start_pfn' allocator. */
73 - table_end = start_pfn;
74 + /* Disable the 'xen_alloc_pfn' allocator. */
75 + table_end = xen_alloc_pfn;
76
77 xprintk("finished!\n");
78 }
79 @@ -578,7 +578,7 @@ static void __init xen_extend_init_mapping(unsigned long tables_space)
80
81 /* Ensure init mappings cover kernel text/data and initial tables. */
82 while (va < (__START_KERNEL_map
83 - + (start_pfn << PAGE_SHIFT)
84 + + (xen_alloc_pfn << PAGE_SHIFT)
85 + tables_space)) {
86 pmd = (pmd_t *)&page[pmd_index(va)];
87 if (pmd_none(*pmd)) {
88 @@ -753,6 +753,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
89 */
90 if (make_readonly(address))
91 attrs &= ~_PAGE_RW;
92 +
93 __set_pte(pte, __pte(address | attrs));
94 }
95 pte = pte_save;
96 @@ -833,10 +834,10 @@ static void __init find_early_table_space(unsigned long end)
97 /*FIXME: what does this do? */
98 xen_extend_init_mapping(tables);
99
100 - table_start = start_pfn;
101 + table_start = xen_alloc_pfn;
102 table_end = table_start + (tables>>PAGE_SHIFT);
103
104 - early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
105 + printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
106 end, table_start << PAGE_SHIFT,
107 (table_start << PAGE_SHIFT) + tables);
108 }
109 @@ -917,16 +918,36 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
110
111 if (!after_bootmem) {
112 #ifdef CONFIG_XEN
113 - BUG_ON(start_pfn != table_end);
114 + BUG_ON(xen_alloc_pfn != table_end);
115 xen_finish_init_mapping();
116 #endif
117 mmu_cr4_features = read_cr4();
118 }
119 __flush_tlb_all();
120
121 + /*FIXME: Xen early allocation is messy-messy-messy:
122 + *
123 + * - memory from spp_getpage() needs to be reserved
124 + * - memory from alloc_low_page() needs to be reserved
125 + * - memory from spp_getpage() is being reserved on
126 + * xen_pagetable_setup_start(), using xen_alloc_pfn and
127 + * xen_start_pfn
128 + * - memory from alloc_low_page() should be reserved here,
129 + * like on non-Xen. But it is already being reserved
130 + * with the spp_getpage() memory. This is why there is
131 + * a #ifndef below
132 + *
133 + * - Probably keeping spp_getpage() as-is would work,
134 + * but first I need to understand how the non-Xen version
135 + * of it works and why XS upstream has different code
136 + * - If that works, we may remove lots of xen-specific
137 + * bootmem reservation
138 + */
139 +#ifndef CONFIG_XEN
140 if (!after_bootmem)
141 reserve_early(table_start << PAGE_SHIFT,
142 table_end << PAGE_SHIFT, "PGTABLE");
143 +#endif
144 }
145
146 #ifndef CONFIG_NUMA
147 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
148 index eb43fac..fc2f956 100644
149 --- a/arch/x86/xen/enlighten.c
150 +++ b/arch/x86/xen/enlighten.c
151 @@ -898,6 +898,11 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
152 xen_set_pte(ptep, pte);
153 }
154
155 +
156 +#ifdef CONFIG_X86_64
157 +static unsigned long xen_start_pfn;
158 +#endif
159 +
160 static __init void xen_pagetable_setup_start(pgd_t *base)
161 {
162 #ifdef CONFIG_X86_32
163 @@ -1011,6 +1016,12 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
164 pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
165 }
166 #endif
167 +
168 +#ifdef CONFIG_X86_64
169 + reserve_bootmem(xen_start_pfn << PAGE_SHIFT,
170 + (xen_alloc_pfn - xen_start_pfn) << PAGE_SHIFT,
171 + BOOTMEM_DEFAULT);
172 +#endif
173 }
174
175 /* This is called once we have the cpu_possible_map */
176 @@ -1362,6 +1373,8 @@ static void __init xen_reserve_top(void)
177 #define xen_reserve_top() do { } while (0)
178 #endif
179
180 +
181 +
182 /* First C function to be called on Xen boot */
183 asmlinkage void __init xen_start_kernel(void)
184 {
185 @@ -1420,7 +1433,7 @@ asmlinkage void __init xen_start_kernel(void)
186
187 #ifdef CONFIG_X86_64
188 /* used by alloc_low_page() */
189 - start_pfn = PFN_UP(__pa_symbol(xen_start_info->pt_base)) + xen_start_info->nr_pt_frames;
190 + xen_start_pfn = xen_alloc_pfn = PFN_UP(__pa_symbol(xen_start_info->pt_base)) + xen_start_info->nr_pt_frames;
191 #endif
192
193 #ifdef CONFIG_X86_32
194 diff --git a/arch/x86/xen/init.h b/arch/x86/xen/init.h
195 index fee0c05..6d91d3e 100644
196 --- a/arch/x86/xen/init.h
197 +++ b/arch/x86/xen/init.h
198 @@ -6,7 +6,7 @@ void xen_init_pt(void);
199 extern pud_t level3_user_pgt[512];
200 extern pgd_t init_level4_user_pgt[];
201
202 -extern unsigned long start_pfn;
203 +extern unsigned long xen_alloc_pfn;
204
205 void early_make_page_readonly(void *va, unsigned int feature);
206
207 --
208 1.5.4.1
209