Annotation of /trunk/kernel26-xen/patches-2.6.25-r1/1028-2.6.25-xen-patch-2.6.25-rc6-rc7.patch
Parent Directory | Revision Log
Revision 609 -
(hide annotations)
(download)
Fri May 23 17:35:37 2008 UTC (16 years, 4 months ago) by niro
File size: 9339 byte(s)
Fri May 23 17:35:37 2008 UTC (16 years, 4 months ago) by niro
File size: 9339 byte(s)
-using opensuse xen patchset, updated kernel configs
1 | niro | 609 | From: kernel.org |
2 | Subject: 2.6.25-rc7 | ||
3 | |||
4 | ## Automatically generated incremental diff | ||
5 | ## From: linux-2.6.25-rc6 | ||
6 | ## To: linux-2.6.25-rc7 | ||
7 | ## Robot: $Id: 1028-2.6.25-xen-patch-2.6.25-rc6-rc7.patch,v 1.1 2008-05-23 17:35:36 niro Exp $ | ||
8 | |||
9 | Automatically created from "patches.kernel.org/patch-2.6.25-rc6-rc7" by xen-port-patches.py | ||
10 | Acked-by: jbeulich@novell.com | ||
11 | |||
12 | Index: head-2008-04-02/arch/x86/kernel/e820_32-xen.c | ||
13 | =================================================================== | ||
14 | --- head-2008-04-02.orig/arch/x86/kernel/e820_32-xen.c 2008-04-14 11:00:03.000000000 +0200 | ||
15 | +++ head-2008-04-02/arch/x86/kernel/e820_32-xen.c 2008-04-14 11:00:10.000000000 +0200 | ||
16 | @@ -833,6 +833,33 @@ static int __init parse_memmap(char *arg | ||
17 | early_param("memmap", parse_memmap); | ||
18 | |||
19 | #ifndef CONFIG_XEN | ||
20 | +void __init update_memory_range(u64 start, u64 size, unsigned old_type, | ||
21 | + unsigned new_type) | ||
22 | +{ | ||
23 | + int i; | ||
24 | + | ||
25 | + BUG_ON(old_type == new_type); | ||
26 | + | ||
27 | + for (i = 0; i < e820.nr_map; i++) { | ||
28 | + struct e820entry *ei = &e820.map[i]; | ||
29 | + u64 final_start, final_end; | ||
30 | + if (ei->type != old_type) | ||
31 | + continue; | ||
32 | + /* totally covered? */ | ||
33 | + if (ei->addr >= start && ei->size <= size) { | ||
34 | + ei->type = new_type; | ||
35 | + continue; | ||
36 | + } | ||
37 | + /* partially covered */ | ||
38 | + final_start = max(start, ei->addr); | ||
39 | + final_end = min(start + size, ei->addr + ei->size); | ||
40 | + if (final_start >= final_end) | ||
41 | + continue; | ||
42 | + add_memory_region(final_start, final_end - final_start, | ||
43 | + new_type); | ||
44 | + } | ||
45 | +} | ||
46 | + | ||
47 | void __init update_e820(void) | ||
48 | { | ||
49 | u8 nr_map; | ||
50 | Index: head-2008-04-02/arch/x86/kernel/e820_64-xen.c | ||
51 | =================================================================== | ||
52 | --- head-2008-04-02.orig/arch/x86/kernel/e820_64-xen.c 2008-04-14 11:00:03.000000000 +0200 | ||
53 | +++ head-2008-04-02/arch/x86/kernel/e820_64-xen.c 2008-04-02 15:00:51.000000000 +0200 | ||
54 | @@ -831,6 +831,33 @@ void __init finish_e820_parsing(void) | ||
55 | } | ||
56 | |||
57 | #ifndef CONFIG_XEN | ||
58 | +void __init update_memory_range(u64 start, u64 size, unsigned old_type, | ||
59 | + unsigned new_type) | ||
60 | +{ | ||
61 | + int i; | ||
62 | + | ||
63 | + BUG_ON(old_type == new_type); | ||
64 | + | ||
65 | + for (i = 0; i < e820.nr_map; i++) { | ||
66 | + struct e820entry *ei = &e820.map[i]; | ||
67 | + u64 final_start, final_end; | ||
68 | + if (ei->type != old_type) | ||
69 | + continue; | ||
70 | + /* totally covered? */ | ||
71 | + if (ei->addr >= start && ei->size <= size) { | ||
72 | + ei->type = new_type; | ||
73 | + continue; | ||
74 | + } | ||
75 | + /* partially covered */ | ||
76 | + final_start = max(start, ei->addr); | ||
77 | + final_end = min(start + size, ei->addr + ei->size); | ||
78 | + if (final_start >= final_end) | ||
79 | + continue; | ||
80 | + add_memory_region(final_start, final_end - final_start, | ||
81 | + new_type); | ||
82 | + } | ||
83 | +} | ||
84 | + | ||
85 | void __init update_e820(void) | ||
86 | { | ||
87 | u8 nr_map; | ||
88 | Index: head-2008-04-02/arch/x86/kernel/quirks-xen.c | ||
89 | =================================================================== | ||
90 | --- head-2008-04-02.orig/arch/x86/kernel/quirks-xen.c 2008-04-14 11:00:03.000000000 +0200 | ||
91 | +++ head-2008-04-02/arch/x86/kernel/quirks-xen.c 2008-04-02 14:58:58.000000000 +0200 | ||
92 | @@ -361,6 +361,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_N | ||
93 | nvidia_force_enable_hpet); | ||
94 | |||
95 | /* LPC bridges */ | ||
96 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, | ||
97 | + nvidia_force_enable_hpet); | ||
98 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, | ||
99 | nvidia_force_enable_hpet); | ||
100 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, | ||
101 | Index: head-2008-04-02/arch/x86/kernel/setup64-xen.c | ||
102 | =================================================================== | ||
103 | --- head-2008-04-02.orig/arch/x86/kernel/setup64-xen.c 2008-04-14 11:00:03.000000000 +0200 | ||
104 | +++ head-2008-04-02/arch/x86/kernel/setup64-xen.c 2008-04-02 14:58:58.000000000 +0200 | ||
105 | @@ -153,14 +153,16 @@ void __init setup_per_cpu_areas(void) | ||
106 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); | ||
107 | for_each_cpu_mask (i, cpu_possible_map) { | ||
108 | char *ptr; | ||
109 | +#ifndef CONFIG_NEED_MULTIPLE_NODES | ||
110 | + ptr = alloc_bootmem_pages(size); | ||
111 | +#else | ||
112 | + int node = early_cpu_to_node(i); | ||
113 | |||
114 | - if (!NODE_DATA(early_cpu_to_node(i))) { | ||
115 | - printk("cpu with no node %d, num_online_nodes %d\n", | ||
116 | - i, num_online_nodes()); | ||
117 | + if (!node_online(node) || !NODE_DATA(node)) | ||
118 | ptr = alloc_bootmem_pages(size); | ||
119 | - } else { | ||
120 | - ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size); | ||
121 | - } | ||
122 | + else | ||
123 | + ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
124 | +#endif | ||
125 | if (!ptr) | ||
126 | panic("Cannot allocate cpu data for CPU %d\n", i); | ||
127 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | ||
128 | Index: head-2008-04-02/arch/x86/mm/ioremap-xen.c | ||
129 | =================================================================== | ||
130 | --- head-2008-04-02.orig/arch/x86/mm/ioremap-xen.c 2008-04-14 11:00:03.000000000 +0200 | ||
131 | +++ head-2008-04-02/arch/x86/mm/ioremap-xen.c 2008-04-02 15:58:11.000000000 +0200 | ||
132 | @@ -262,7 +262,7 @@ static int ioremap_change_attr(unsigned | ||
133 | * have to convert them into an offset in a page-aligned mapping, but the | ||
134 | * caller shouldn't need to know that small detail. | ||
135 | */ | ||
136 | -static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | ||
137 | +static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | ||
138 | enum ioremap_mode mode) | ||
139 | { | ||
140 | unsigned long mfn, offset, last_addr, vaddr; | ||
141 | @@ -359,13 +359,13 @@ static void __iomem *__ioremap(unsigned | ||
142 | * | ||
143 | * Must be freed with iounmap. | ||
144 | */ | ||
145 | -void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) | ||
146 | +void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | ||
147 | { | ||
148 | return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); | ||
149 | } | ||
150 | EXPORT_SYMBOL(ioremap_nocache); | ||
151 | |||
152 | -void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) | ||
153 | +void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | ||
154 | { | ||
155 | return __ioremap(phys_addr, size, IOR_MODE_CACHED); | ||
156 | } | ||
157 | Index: head-2008-04-02/include/asm-x86/mach-xen/asm/e820_64.h | ||
158 | =================================================================== | ||
159 | --- head-2008-04-02.orig/include/asm-x86/mach-xen/asm/e820_64.h 2008-04-14 11:00:03.000000000 +0200 | ||
160 | +++ head-2008-04-02/include/asm-x86/mach-xen/asm/e820_64.h 2008-04-02 14:58:58.000000000 +0200 | ||
161 | @@ -18,6 +18,8 @@ extern unsigned long find_e820_area(unsi | ||
162 | unsigned size, unsigned long align); | ||
163 | extern void add_memory_region(unsigned long start, unsigned long size, | ||
164 | int type); | ||
165 | +extern void update_memory_range(u64 start, u64 size, unsigned old_type, | ||
166 | + unsigned new_type); | ||
167 | extern void setup_memory_region(void); | ||
168 | extern void contig_e820_setup(void); | ||
169 | extern unsigned long e820_end_of_ram(void); | ||
170 | Index: head-2008-04-02/include/asm-x86/mach-xen/asm/io_32.h | ||
171 | =================================================================== | ||
172 | --- head-2008-04-02.orig/include/asm-x86/mach-xen/asm/io_32.h 2008-04-14 11:00:03.000000000 +0200 | ||
173 | +++ head-2008-04-02/include/asm-x86/mach-xen/asm/io_32.h 2008-04-02 14:58:58.000000000 +0200 | ||
174 | @@ -127,13 +127,13 @@ static inline void * phys_to_virt(unsign | ||
175 | * If the area you are trying to map is a PCI BAR you should have a | ||
176 | * look at pci_iomap(). | ||
177 | */ | ||
178 | -extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); | ||
179 | -extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); | ||
180 | +extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); | ||
181 | +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); | ||
182 | |||
183 | /* | ||
184 | * The default ioremap() behavior is non-cached: | ||
185 | */ | ||
186 | -static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | ||
187 | +static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | ||
188 | { | ||
189 | return ioremap_nocache(offset, size); | ||
190 | } | ||
191 | Index: head-2008-04-02/include/asm-x86/mach-xen/asm/io_64.h | ||
192 | =================================================================== | ||
193 | --- head-2008-04-02.orig/include/asm-x86/mach-xen/asm/io_64.h 2008-04-14 11:00:03.000000000 +0200 | ||
194 | +++ head-2008-04-02/include/asm-x86/mach-xen/asm/io_64.h 2008-04-02 14:58:58.000000000 +0200 | ||
195 | @@ -178,13 +178,13 @@ extern void early_iounmap(void *addr, un | ||
196 | * it's useful if some control registers are in such an area and write combining | ||
197 | * or read caching is not desirable: | ||
198 | */ | ||
199 | -extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); | ||
200 | -extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); | ||
201 | +extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); | ||
202 | +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); | ||
203 | |||
204 | /* | ||
205 | * The default ioremap() behavior is non-cached: | ||
206 | */ | ||
207 | -static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | ||
208 | +static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | ||
209 | { | ||
210 | return ioremap_nocache(offset, size); | ||
211 | } | ||
212 | Index: head-2008-04-02/include/asm-x86/mach-xen/asm/page.h | ||
213 | =================================================================== | ||
214 | --- head-2008-04-02.orig/include/asm-x86/mach-xen/asm/page.h 2008-04-14 11:00:03.000000000 +0200 | ||
215 | +++ head-2008-04-02/include/asm-x86/mach-xen/asm/page.h 2008-04-02 15:53:32.000000000 +0200 | ||
216 | @@ -62,13 +62,13 @@ extern int page_is_ram(unsigned long pag | ||
217 | |||
218 | struct page; | ||
219 | |||
220 | -static void inline clear_user_page(void *page, unsigned long vaddr, | ||
221 | +static inline void clear_user_page(void *page, unsigned long vaddr, | ||
222 | struct page *pg) | ||
223 | { | ||
224 | clear_page(page); | ||
225 | } | ||
226 | |||
227 | -static void inline copy_user_page(void *to, void *from, unsigned long vaddr, | ||
228 | +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, | ||
229 | struct page *topage) | ||
230 | { | ||
231 | copy_page(to, from); |