Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/include/asm-um/pgtable.h
Parent Directory
|
Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 3 months ago) by niro
File MIME type: text/plain
File size: 10563 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 3 months ago) by niro
File MIME type: text/plain
File size: 10563 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) |
3 | * Copyright 2003 PathScale, Inc. |
4 | * Derived from include/asm-i386/pgtable.h |
5 | * Licensed under the GPL |
6 | */ |
7 | |
8 | #ifndef __UM_PGTABLE_H |
9 | #define __UM_PGTABLE_H |
10 | |
11 | #include "linux/sched.h" |
12 | #include "linux/linkage.h" |
13 | #include "asm/processor.h" |
14 | #include "asm/page.h" |
15 | #include "asm/fixmap.h" |
16 | |
17 | #define _PAGE_PRESENT 0x001 |
18 | #define _PAGE_NEWPAGE 0x002 |
19 | #define _PAGE_NEWPROT 0x004 |
20 | #define _PAGE_FILE 0x008 /* set:pagecache unset:swap */ |
21 | #define _PAGE_PROTNONE 0x010 /* If not present */ |
22 | #define _PAGE_RW 0x020 |
23 | #define _PAGE_USER 0x040 |
24 | #define _PAGE_ACCESSED 0x080 |
25 | #define _PAGE_DIRTY 0x100 |
26 | |
27 | #ifdef CONFIG_3_LEVEL_PGTABLES |
28 | #include "asm/pgtable-3level.h" |
29 | #else |
30 | #include "asm/pgtable-2level.h" |
31 | #endif |
32 | |
33 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
34 | |
35 | extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt, |
36 | pte_t *pte_out); |
37 | |
38 | /* zero page used for uninitialized stuff */ |
39 | extern unsigned long *empty_zero_page; |
40 | |
41 | #define pgtable_cache_init() do ; while (0) |
42 | |
43 | /* |
44 | * pgd entries used up by user/kernel: |
45 | */ |
46 | |
47 | #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT) |
48 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
49 | |
50 | #ifndef __ASSEMBLY__ |
51 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
52 | * current 8MB value just means that there will be a 8MB "hole" after the |
53 | * physical memory until the kernel virtual memory starts. That means that |
54 | * any out-of-bounds memory accesses will hopefully be caught. |
55 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
56 | * area for the same reason. ;) |
57 | */ |
58 | |
59 | extern unsigned long end_iomem; |
60 | |
61 | #define VMALLOC_OFFSET (__va_space) |
62 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
63 | |
64 | #ifdef CONFIG_HIGHMEM |
65 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) |
66 | #else |
67 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
68 | #endif |
69 | |
70 | #define REGION_SHIFT (sizeof(pte_t) * 8 - 4) |
71 | #define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT) |
72 | |
73 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) |
74 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
75 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
76 | |
77 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
78 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) |
79 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
80 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
81 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) |
82 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) |
83 | |
84 | /* |
85 | * The i386 can't do page protection for execute, and considers that the same are read. |
86 | * Also, write permissions imply read permissions. This is the closest we can get.. |
87 | */ |
88 | #define __P000 PAGE_NONE |
89 | #define __P001 PAGE_READONLY |
90 | #define __P010 PAGE_COPY |
91 | #define __P011 PAGE_COPY |
92 | #define __P100 PAGE_READONLY |
93 | #define __P101 PAGE_READONLY |
94 | #define __P110 PAGE_COPY |
95 | #define __P111 PAGE_COPY |
96 | |
97 | #define __S000 PAGE_NONE |
98 | #define __S001 PAGE_READONLY |
99 | #define __S010 PAGE_SHARED |
100 | #define __S011 PAGE_SHARED |
101 | #define __S100 PAGE_READONLY |
102 | #define __S101 PAGE_READONLY |
103 | #define __S110 PAGE_SHARED |
104 | #define __S111 PAGE_SHARED |
105 | |
106 | /* |
107 | * Define this if things work differently on an i386 and an i486: |
108 | * it will (on an i486) warn about kernel memory accesses that are |
109 | * done without a 'access_ok(VERIFY_WRITE,..)' |
110 | */ |
111 | #undef TEST_VERIFY_AREA |
112 | |
113 | /* page table for 0-4MB for everybody */ |
114 | extern unsigned long pg0[1024]; |
115 | |
116 | /* |
117 | * ZERO_PAGE is a global shared page that is always zero: used |
118 | * for zero-mapped memory areas etc.. |
119 | */ |
120 | |
121 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) |
122 | |
123 | /* number of bits that fit into a memory pointer */ |
124 | #define BITS_PER_PTR (8*sizeof(unsigned long)) |
125 | |
126 | /* to align the pointer to a pointer address */ |
127 | #define PTR_MASK (~(sizeof(void*)-1)) |
128 | |
129 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ |
130 | /* 64-bit machines, beware! SRB. */ |
131 | #define SIZEOF_PTR_LOG2 3 |
132 | |
133 | /* to find an entry in a page-table */ |
134 | #define PAGE_PTR(address) \ |
135 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) |
136 | |
137 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) |
138 | |
139 | #define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE)) |
140 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
141 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
142 | #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) |
143 | |
144 | #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) |
145 | #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) |
146 | |
147 | #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) |
148 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) |
149 | |
150 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
151 | |
152 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) |
153 | |
154 | #define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) |
155 | #define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT)) |
156 | #define phys_addr(p) ((p) & ~REGION_MASK) |
157 | |
158 | /* |
159 | * The following only work if pte_present() is true. |
160 | * Undefined behaviour if not.. |
161 | */ |
162 | static inline int pte_user(pte_t pte) |
163 | { |
164 | return((pte_get_bits(pte, _PAGE_USER)) && |
165 | !(pte_get_bits(pte, _PAGE_PROTNONE))); |
166 | } |
167 | |
168 | static inline int pte_read(pte_t pte) |
169 | { |
170 | return((pte_get_bits(pte, _PAGE_USER)) && |
171 | !(pte_get_bits(pte, _PAGE_PROTNONE))); |
172 | } |
173 | |
174 | static inline int pte_exec(pte_t pte){ |
175 | return((pte_get_bits(pte, _PAGE_USER)) && |
176 | !(pte_get_bits(pte, _PAGE_PROTNONE))); |
177 | } |
178 | |
179 | static inline int pte_write(pte_t pte) |
180 | { |
181 | return((pte_get_bits(pte, _PAGE_RW)) && |
182 | !(pte_get_bits(pte, _PAGE_PROTNONE))); |
183 | } |
184 | |
185 | /* |
186 | * The following only works if pte_present() is not true. |
187 | */ |
188 | static inline int pte_file(pte_t pte) |
189 | { |
190 | return pte_get_bits(pte, _PAGE_FILE); |
191 | } |
192 | |
193 | static inline int pte_dirty(pte_t pte) |
194 | { |
195 | return pte_get_bits(pte, _PAGE_DIRTY); |
196 | } |
197 | |
198 | static inline int pte_young(pte_t pte) |
199 | { |
200 | return pte_get_bits(pte, _PAGE_ACCESSED); |
201 | } |
202 | |
203 | static inline int pte_newpage(pte_t pte) |
204 | { |
205 | return pte_get_bits(pte, _PAGE_NEWPAGE); |
206 | } |
207 | |
208 | static inline int pte_newprot(pte_t pte) |
209 | { |
210 | return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); |
211 | } |
212 | |
213 | static inline pte_t pte_rdprotect(pte_t pte) |
214 | { |
215 | pte_clear_bits(pte, _PAGE_USER); |
216 | return(pte_mknewprot(pte)); |
217 | } |
218 | |
219 | static inline pte_t pte_exprotect(pte_t pte) |
220 | { |
221 | pte_clear_bits(pte, _PAGE_USER); |
222 | return(pte_mknewprot(pte)); |
223 | } |
224 | |
225 | static inline pte_t pte_mkclean(pte_t pte) |
226 | { |
227 | pte_clear_bits(pte, _PAGE_DIRTY); |
228 | return(pte); |
229 | } |
230 | |
231 | static inline pte_t pte_mkold(pte_t pte) |
232 | { |
233 | pte_clear_bits(pte, _PAGE_ACCESSED); |
234 | return(pte); |
235 | } |
236 | |
237 | static inline pte_t pte_wrprotect(pte_t pte) |
238 | { |
239 | pte_clear_bits(pte, _PAGE_RW); |
240 | return(pte_mknewprot(pte)); |
241 | } |
242 | |
243 | static inline pte_t pte_mkread(pte_t pte) |
244 | { |
245 | pte_set_bits(pte, _PAGE_RW); |
246 | return(pte_mknewprot(pte)); |
247 | } |
248 | |
249 | static inline pte_t pte_mkexec(pte_t pte) |
250 | { |
251 | pte_set_bits(pte, _PAGE_USER); |
252 | return(pte_mknewprot(pte)); |
253 | } |
254 | |
255 | static inline pte_t pte_mkdirty(pte_t pte) |
256 | { |
257 | pte_set_bits(pte, _PAGE_DIRTY); |
258 | return(pte); |
259 | } |
260 | |
261 | static inline pte_t pte_mkyoung(pte_t pte) |
262 | { |
263 | pte_set_bits(pte, _PAGE_ACCESSED); |
264 | return(pte); |
265 | } |
266 | |
267 | static inline pte_t pte_mkwrite(pte_t pte) |
268 | { |
269 | pte_set_bits(pte, _PAGE_RW); |
270 | return(pte_mknewprot(pte)); |
271 | } |
272 | |
273 | static inline pte_t pte_mkuptodate(pte_t pte) |
274 | { |
275 | pte_clear_bits(pte, _PAGE_NEWPAGE); |
276 | if(pte_present(pte)) |
277 | pte_clear_bits(pte, _PAGE_NEWPROT); |
278 | return(pte); |
279 | } |
280 | |
281 | extern phys_t page_to_phys(struct page *page); |
282 | |
283 | /* |
284 | * Conversion functions: convert a page and protection to a page entry, |
285 | * and a page entry and page directory to the page they refer to. |
286 | */ |
287 | |
288 | extern pte_t mk_pte(struct page *page, pgprot_t pgprot); |
289 | |
290 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
291 | { |
292 | pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); |
293 | if(pte_present(pte)) pte = pte_mknewpage(pte_mknewprot(pte)); |
294 | return pte; |
295 | } |
296 | |
297 | #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
298 | |
299 | /* |
300 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] |
301 | * |
302 | * this macro returns the index of the entry in the pgd page which would |
303 | * control the given virtual address |
304 | */ |
305 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
306 | |
307 | #define pgd_index_k(addr) pgd_index(addr) |
308 | |
309 | /* |
310 | * pgd_offset() returns a (pgd_t *) |
311 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
312 | */ |
313 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) |
314 | |
315 | /* |
316 | * a shortcut which implies the use of the kernel's pgd, instead |
317 | * of a process's |
318 | */ |
319 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
320 | |
321 | /* |
322 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
323 | * |
324 | * this macro returns the index of the entry in the pmd page which would |
325 | * control the given virtual address |
326 | */ |
327 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
328 | |
329 | /* |
330 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
331 | * |
332 | * this macro returns the index of the entry in the pte page which would |
333 | * control the given virtual address |
334 | */ |
335 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
336 | #define pte_offset_kernel(dir, address) \ |
337 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) |
338 | #define pte_offset_map(dir, address) \ |
339 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) |
340 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) |
341 | #define pte_unmap(pte) do { } while (0) |
342 | #define pte_unmap_nested(pte) do { } while (0) |
343 | |
344 | #define update_mmu_cache(vma,address,pte) do ; while (0) |
345 | |
346 | /* Encode and de-code a swap entry */ |
347 | #define __swp_type(x) (((x).val >> 4) & 0x3f) |
348 | #define __swp_offset(x) ((x).val >> 11) |
349 | |
350 | #define __swp_entry(type, offset) \ |
351 | ((swp_entry_t) { ((type) << 4) | ((offset) << 11) }) |
352 | #define __pte_to_swp_entry(pte) \ |
353 | ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) |
354 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
355 | |
356 | #define kern_addr_valid(addr) (1) |
357 | |
358 | #include <asm-generic/pgtable.h> |
359 | |
360 | #include <asm-generic/pgtable-nopud.h> |
361 | |
362 | #endif |
363 | #endif |
364 | |
365 | extern struct page *phys_to_page(const unsigned long phys); |
366 | extern struct page *__virt_to_page(const unsigned long virt); |
367 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) |
368 | |
369 | /* |
370 | * Overrides for Emacs so that we follow Linus's tabbing style. |
371 | * Emacs will notice this stuff at the end of the file and automatically |
372 | * adjust the settings for this buffer only. This must remain at the end |
373 | * of the file. |
374 | * --------------------------------------------------------------------------- |
375 | * Local variables: |
376 | * c-file-style: "linux" |
377 | * End: |
378 | */ |