Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/mm/rmap.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 23219 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 23219 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * mm/rmap.c - physical to virtual reverse mappings |
3 | * |
4 | * Copyright 2001, Rik van Riel <riel@conectiva.com.br> |
5 | * Released under the General Public License (GPL). |
6 | * |
7 | * Simple, low overhead reverse mapping scheme. |
8 | * Please try to keep this thing as modular as possible. |
9 | * |
10 | * Provides methods for unmapping each kind of mapped page: |
11 | * the anon methods track anonymous pages, and |
12 | * the file methods track pages belonging to an inode. |
13 | * |
14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 |
15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 |
16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 |
17 | * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 |
18 | */ |
19 | |
20 | /* |
21 | * Lock ordering in mm: |
22 | * |
23 | * inode->i_sem (while writing or truncating, not reading or faulting) |
24 | * inode->i_alloc_sem |
25 | * |
26 | * When a page fault occurs in writing from user to file, down_read |
27 | * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within |
28 | * down_read of mmap_sem; i_sem and down_write of mmap_sem are never |
29 | * taken together; in truncation, i_sem is taken outermost. |
30 | * |
31 | * mm->mmap_sem |
32 | * page->flags PG_locked (lock_page) |
33 | * mapping->i_mmap_lock |
34 | * anon_vma->lock |
35 | * mm->page_table_lock |
36 | * zone->lru_lock (in mark_page_accessed) |
37 | * swap_list_lock (in swap_free etc's swap_info_get) |
38 | * mmlist_lock (in mmput, drain_mmlist and others) |
39 | * swap_device_lock (in swap_duplicate, swap_info_get) |
40 | * mapping->private_lock (in __set_page_dirty_buffers) |
41 | * inode_lock (in set_page_dirty's __mark_inode_dirty) |
42 | * sb_lock (within inode_lock in fs/fs-writeback.c) |
43 | * mapping->tree_lock (widely used, in set_page_dirty, |
44 | * in arch-dependent flush_dcache_mmap_lock, |
45 | * within inode_lock in __sync_single_inode) |
46 | */ |
47 | |
48 | #include <linux/mm.h> |
49 | #include <linux/pagemap.h> |
50 | #include <linux/swap.h> |
51 | #include <linux/swapops.h> |
52 | #include <linux/slab.h> |
53 | #include <linux/init.h> |
54 | #include <linux/rmap.h> |
55 | #include <linux/rcupdate.h> |
56 | |
57 | #include <asm/tlbflush.h> |
58 | |
59 | //#define RMAP_DEBUG /* can be enabled only for debugging */ |
60 | |
61 | kmem_cache_t *anon_vma_cachep; |
62 | |
63 | static inline void validate_anon_vma(struct vm_area_struct *find_vma) |
64 | { |
65 | #ifdef RMAP_DEBUG |
66 | struct anon_vma *anon_vma = find_vma->anon_vma; |
67 | struct vm_area_struct *vma; |
68 | unsigned int mapcount = 0; |
69 | int found = 0; |
70 | |
71 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
72 | mapcount++; |
73 | BUG_ON(mapcount > 100000); |
74 | if (vma == find_vma) |
75 | found = 1; |
76 | } |
77 | BUG_ON(!found); |
78 | #endif |
79 | } |
80 | |
81 | /* This must be called under the mmap_sem. */ |
82 | int anon_vma_prepare(struct vm_area_struct *vma) |
83 | { |
84 | struct anon_vma *anon_vma = vma->anon_vma; |
85 | |
86 | might_sleep(); |
87 | if (unlikely(!anon_vma)) { |
88 | struct mm_struct *mm = vma->vm_mm; |
89 | struct anon_vma *allocated, *locked; |
90 | |
91 | anon_vma = find_mergeable_anon_vma(vma); |
92 | if (anon_vma) { |
93 | allocated = NULL; |
94 | locked = anon_vma; |
95 | spin_lock(&locked->lock); |
96 | } else { |
97 | anon_vma = anon_vma_alloc(); |
98 | if (unlikely(!anon_vma)) |
99 | return -ENOMEM; |
100 | allocated = anon_vma; |
101 | locked = NULL; |
102 | } |
103 | |
104 | /* page_table_lock to protect against threads */ |
105 | spin_lock(&mm->page_table_lock); |
106 | if (likely(!vma->anon_vma)) { |
107 | vma->anon_vma = anon_vma; |
108 | list_add(&vma->anon_vma_node, &anon_vma->head); |
109 | allocated = NULL; |
110 | } |
111 | spin_unlock(&mm->page_table_lock); |
112 | |
113 | if (locked) |
114 | spin_unlock(&locked->lock); |
115 | if (unlikely(allocated)) |
116 | anon_vma_free(allocated); |
117 | } |
118 | return 0; |
119 | } |
120 | |
121 | void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) |
122 | { |
123 | BUG_ON(vma->anon_vma != next->anon_vma); |
124 | list_del(&next->anon_vma_node); |
125 | } |
126 | |
127 | void __anon_vma_link(struct vm_area_struct *vma) |
128 | { |
129 | struct anon_vma *anon_vma = vma->anon_vma; |
130 | |
131 | if (anon_vma) { |
132 | list_add(&vma->anon_vma_node, &anon_vma->head); |
133 | validate_anon_vma(vma); |
134 | } |
135 | } |
136 | |
137 | void anon_vma_link(struct vm_area_struct *vma) |
138 | { |
139 | struct anon_vma *anon_vma = vma->anon_vma; |
140 | |
141 | if (anon_vma) { |
142 | spin_lock(&anon_vma->lock); |
143 | list_add(&vma->anon_vma_node, &anon_vma->head); |
144 | validate_anon_vma(vma); |
145 | spin_unlock(&anon_vma->lock); |
146 | } |
147 | } |
148 | |
149 | void anon_vma_unlink(struct vm_area_struct *vma) |
150 | { |
151 | struct anon_vma *anon_vma = vma->anon_vma; |
152 | int empty; |
153 | |
154 | if (!anon_vma) |
155 | return; |
156 | |
157 | spin_lock(&anon_vma->lock); |
158 | validate_anon_vma(vma); |
159 | list_del(&vma->anon_vma_node); |
160 | |
161 | /* We must garbage collect the anon_vma if it's empty */ |
162 | empty = list_empty(&anon_vma->head); |
163 | spin_unlock(&anon_vma->lock); |
164 | |
165 | if (empty) |
166 | anon_vma_free(anon_vma); |
167 | } |
168 | |
169 | static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) |
170 | { |
171 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == |
172 | SLAB_CTOR_CONSTRUCTOR) { |
173 | struct anon_vma *anon_vma = data; |
174 | |
175 | spin_lock_init(&anon_vma->lock); |
176 | INIT_LIST_HEAD(&anon_vma->head); |
177 | } |
178 | } |
179 | |
180 | void __init anon_vma_init(void) |
181 | { |
182 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), |
183 | 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); |
184 | } |
185 | |
186 | /* |
187 | * Getting a lock on a stable anon_vma from a page off the LRU is |
188 | * tricky: page_lock_anon_vma rely on RCU to guard against the races. |
189 | */ |
190 | static struct anon_vma *page_lock_anon_vma(struct page *page) |
191 | { |
192 | struct anon_vma *anon_vma = NULL; |
193 | unsigned long anon_mapping; |
194 | |
195 | rcu_read_lock(); |
196 | anon_mapping = (unsigned long) page->mapping; |
197 | if (!(anon_mapping & PAGE_MAPPING_ANON)) |
198 | goto out; |
199 | if (!page_mapped(page)) |
200 | goto out; |
201 | |
202 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
203 | spin_lock(&anon_vma->lock); |
204 | out: |
205 | rcu_read_unlock(); |
206 | return anon_vma; |
207 | } |
208 | |
209 | /* |
210 | * At what user virtual address is page expected in vma? |
211 | */ |
212 | static inline unsigned long |
213 | vma_address(struct page *page, struct vm_area_struct *vma) |
214 | { |
215 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
216 | unsigned long address; |
217 | |
218 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
219 | if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { |
220 | /* page should be within any vma from prio_tree_next */ |
221 | BUG_ON(!PageAnon(page)); |
222 | return -EFAULT; |
223 | } |
224 | return address; |
225 | } |
226 | |
227 | /* |
228 | * At what user virtual address is page expected in vma? checking that the |
229 | * page matches the vma: currently only used by unuse_process, on anon pages. |
230 | */ |
231 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
232 | { |
233 | if (PageAnon(page)) { |
234 | if ((void *)vma->anon_vma != |
235 | (void *)page->mapping - PAGE_MAPPING_ANON) |
236 | return -EFAULT; |
237 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
238 | if (vma->vm_file->f_mapping != page->mapping) |
239 | return -EFAULT; |
240 | } else |
241 | return -EFAULT; |
242 | return vma_address(page, vma); |
243 | } |
244 | |
245 | /* |
246 | * Check that @page is mapped at @address into @mm. |
247 | * |
248 | * On success returns with mapped pte and locked mm->page_table_lock. |
249 | */ |
250 | static pte_t *page_check_address(struct page *page, struct mm_struct *mm, |
251 | unsigned long address) |
252 | { |
253 | pgd_t *pgd; |
254 | pud_t *pud; |
255 | pmd_t *pmd; |
256 | pte_t *pte; |
257 | |
258 | /* |
259 | * We need the page_table_lock to protect us from page faults, |
260 | * munmap, fork, etc... |
261 | */ |
262 | spin_lock(&mm->page_table_lock); |
263 | pgd = pgd_offset(mm, address); |
264 | if (likely(pgd_present(*pgd))) { |
265 | pud = pud_offset(pgd, address); |
266 | if (likely(pud_present(*pud))) { |
267 | pmd = pmd_offset(pud, address); |
268 | if (likely(pmd_present(*pmd))) { |
269 | pte = pte_offset_map(pmd, address); |
270 | if (likely(pte_present(*pte) && |
271 | page_to_pfn(page) == pte_pfn(*pte))) |
272 | return pte; |
273 | pte_unmap(pte); |
274 | } |
275 | } |
276 | } |
277 | spin_unlock(&mm->page_table_lock); |
278 | return ERR_PTR(-ENOENT); |
279 | } |
280 | |
281 | /* |
282 | * Subfunctions of page_referenced: page_referenced_one called |
283 | * repeatedly from either page_referenced_anon or page_referenced_file. |
284 | */ |
285 | static int page_referenced_one(struct page *page, |
286 | struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token) |
287 | { |
288 | struct mm_struct *mm = vma->vm_mm; |
289 | unsigned long address; |
290 | pte_t *pte; |
291 | int referenced = 0; |
292 | |
293 | if (!get_mm_counter(mm, rss)) |
294 | goto out; |
295 | address = vma_address(page, vma); |
296 | if (address == -EFAULT) |
297 | goto out; |
298 | |
299 | pte = page_check_address(page, mm, address); |
300 | if (!IS_ERR(pte)) { |
301 | if (ptep_clear_flush_young(vma, address, pte)) |
302 | referenced++; |
303 | |
304 | if (mm != current->mm && !ignore_token && has_swap_token(mm)) |
305 | referenced++; |
306 | |
307 | (*mapcount)--; |
308 | pte_unmap(pte); |
309 | spin_unlock(&mm->page_table_lock); |
310 | } |
311 | out: |
312 | return referenced; |
313 | } |
314 | |
315 | static int page_referenced_anon(struct page *page, int ignore_token) |
316 | { |
317 | unsigned int mapcount; |
318 | struct anon_vma *anon_vma; |
319 | struct vm_area_struct *vma; |
320 | int referenced = 0; |
321 | |
322 | anon_vma = page_lock_anon_vma(page); |
323 | if (!anon_vma) |
324 | return referenced; |
325 | |
326 | mapcount = page_mapcount(page); |
327 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
328 | referenced += page_referenced_one(page, vma, &mapcount, |
329 | ignore_token); |
330 | if (!mapcount) |
331 | break; |
332 | } |
333 | spin_unlock(&anon_vma->lock); |
334 | return referenced; |
335 | } |
336 | |
337 | /** |
338 | * page_referenced_file - referenced check for object-based rmap |
339 | * @page: the page we're checking references on. |
340 | * |
341 | * For an object-based mapped page, find all the places it is mapped and |
342 | * check/clear the referenced flag. This is done by following the page->mapping |
343 | * pointer, then walking the chain of vmas it holds. It returns the number |
344 | * of references it found. |
345 | * |
346 | * This function is only called from page_referenced for object-based pages. |
347 | */ |
348 | static int page_referenced_file(struct page *page, int ignore_token) |
349 | { |
350 | unsigned int mapcount; |
351 | struct address_space *mapping = page->mapping; |
352 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
353 | struct vm_area_struct *vma; |
354 | struct prio_tree_iter iter; |
355 | int referenced = 0; |
356 | |
357 | /* |
358 | * The caller's checks on page->mapping and !PageAnon have made |
359 | * sure that this is a file page: the check for page->mapping |
360 | * excludes the case just before it gets set on an anon page. |
361 | */ |
362 | BUG_ON(PageAnon(page)); |
363 | |
364 | /* |
365 | * The page lock not only makes sure that page->mapping cannot |
366 | * suddenly be NULLified by truncation, it makes sure that the |
367 | * structure at mapping cannot be freed and reused yet, |
368 | * so we can safely take mapping->i_mmap_lock. |
369 | */ |
370 | BUG_ON(!PageLocked(page)); |
371 | |
372 | spin_lock(&mapping->i_mmap_lock); |
373 | |
374 | /* |
375 | * i_mmap_lock does not stabilize mapcount at all, but mapcount |
376 | * is more likely to be accurate if we note it after spinning. |
377 | */ |
378 | mapcount = page_mapcount(page); |
379 | |
380 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
381 | if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) |
382 | == (VM_LOCKED|VM_MAYSHARE)) { |
383 | referenced++; |
384 | break; |
385 | } |
386 | referenced += page_referenced_one(page, vma, &mapcount, |
387 | ignore_token); |
388 | if (!mapcount) |
389 | break; |
390 | } |
391 | |
392 | spin_unlock(&mapping->i_mmap_lock); |
393 | return referenced; |
394 | } |
395 | |
396 | /** |
397 | * page_referenced - test if the page was referenced |
398 | * @page: the page to test |
399 | * @is_locked: caller holds lock on the page |
400 | * |
401 | * Quick test_and_clear_referenced for all mappings to a page, |
402 | * returns the number of ptes which referenced the page. |
403 | */ |
404 | int page_referenced(struct page *page, int is_locked, int ignore_token) |
405 | { |
406 | int referenced = 0; |
407 | |
408 | if (!swap_token_default_timeout) |
409 | ignore_token = 1; |
410 | |
411 | if (page_test_and_clear_young(page)) |
412 | referenced++; |
413 | |
414 | if (TestClearPageReferenced(page)) |
415 | referenced++; |
416 | |
417 | if (page_mapped(page) && page->mapping) { |
418 | if (PageAnon(page)) |
419 | referenced += page_referenced_anon(page, ignore_token); |
420 | else if (is_locked) |
421 | referenced += page_referenced_file(page, ignore_token); |
422 | else if (TestSetPageLocked(page)) |
423 | referenced++; |
424 | else { |
425 | if (page->mapping) |
426 | referenced += page_referenced_file(page, |
427 | ignore_token); |
428 | unlock_page(page); |
429 | } |
430 | } |
431 | return referenced; |
432 | } |
433 | |
434 | /** |
435 | * page_add_anon_rmap - add pte mapping to an anonymous page |
436 | * @page: the page to add the mapping to |
437 | * @vma: the vm area in which the mapping is added |
438 | * @address: the user virtual address mapped |
439 | * |
440 | * The caller needs to hold the mm->page_table_lock. |
441 | */ |
442 | void page_add_anon_rmap(struct page *page, |
443 | struct vm_area_struct *vma, unsigned long address) |
444 | { |
445 | struct anon_vma *anon_vma = vma->anon_vma; |
446 | pgoff_t index; |
447 | |
448 | BUG_ON(PageReserved(page)); |
449 | BUG_ON(!anon_vma); |
450 | |
451 | inc_mm_counter(vma->vm_mm, anon_rss); |
452 | |
453 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
454 | index = (address - vma->vm_start) >> PAGE_SHIFT; |
455 | index += vma->vm_pgoff; |
456 | index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; |
457 | |
458 | if (atomic_inc_and_test(&page->_mapcount)) { |
459 | page->index = index; |
460 | page->mapping = (struct address_space *) anon_vma; |
461 | inc_page_state(nr_mapped); |
462 | } |
463 | /* else checking page index and mapping is racy */ |
464 | } |
465 | |
466 | /** |
467 | * page_add_file_rmap - add pte mapping to a file page |
468 | * @page: the page to add the mapping to |
469 | * |
470 | * The caller needs to hold the mm->page_table_lock. |
471 | */ |
472 | void page_add_file_rmap(struct page *page) |
473 | { |
474 | BUG_ON(PageAnon(page)); |
475 | if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) |
476 | return; |
477 | |
478 | if (atomic_inc_and_test(&page->_mapcount)) |
479 | inc_page_state(nr_mapped); |
480 | } |
481 | |
482 | /** |
483 | * page_remove_rmap - take down pte mapping from a page |
484 | * @page: page to remove mapping from |
485 | * |
486 | * Caller needs to hold the mm->page_table_lock. |
487 | */ |
488 | void page_remove_rmap(struct page *page) |
489 | { |
490 | BUG_ON(PageReserved(page)); |
491 | |
492 | if (atomic_add_negative(-1, &page->_mapcount)) { |
493 | BUG_ON(page_mapcount(page) < 0); |
494 | /* |
495 | * It would be tidy to reset the PageAnon mapping here, |
496 | * but that might overwrite a racing page_add_anon_rmap |
497 | * which increments mapcount after us but sets mapping |
498 | * before us: so leave the reset to free_hot_cold_page, |
499 | * and remember that it's only reliable while mapped. |
500 | * Leaving it set also helps swapoff to reinstate ptes |
501 | * faster for those pages still in swapcache. |
502 | */ |
503 | if (page_test_and_clear_dirty(page)) |
504 | set_page_dirty(page); |
505 | dec_page_state(nr_mapped); |
506 | } |
507 | } |
508 | |
509 | /* |
510 | * Subfunctions of try_to_unmap: try_to_unmap_one called |
511 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. |
512 | */ |
513 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) |
514 | { |
515 | struct mm_struct *mm = vma->vm_mm; |
516 | unsigned long address; |
517 | pte_t *pte; |
518 | pte_t pteval; |
519 | int ret = SWAP_AGAIN; |
520 | |
521 | if (!get_mm_counter(mm, rss)) |
522 | goto out; |
523 | address = vma_address(page, vma); |
524 | if (address == -EFAULT) |
525 | goto out; |
526 | |
527 | pte = page_check_address(page, mm, address); |
528 | if (IS_ERR(pte)) |
529 | goto out; |
530 | |
531 | /* |
532 | * If the page is mlock()d, we cannot swap it out. |
533 | * If it's recently referenced (perhaps page_referenced |
534 | * skipped over this mm) then we should reactivate it. |
535 | */ |
536 | if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || |
537 | ptep_clear_flush_young(vma, address, pte)) { |
538 | ret = SWAP_FAIL; |
539 | goto out_unmap; |
540 | } |
541 | |
542 | /* |
543 | * Don't pull an anonymous page out from under get_user_pages. |
544 | * GUP carefully breaks COW and raises page count (while holding |
545 | * page_table_lock, as we have here) to make sure that the page |
546 | * cannot be freed. If we unmap that page here, a user write |
547 | * access to the virtual address will bring back the page, but |
548 | * its raised count will (ironically) be taken to mean it's not |
549 | * an exclusive swap page, do_wp_page will replace it by a copy |
550 | * page, and the user never get to see the data GUP was holding |
551 | * the original page for. |
552 | * |
553 | * This test is also useful for when swapoff (unuse_process) has |
554 | * to drop page lock: its reference to the page stops existing |
555 | * ptes from being unmapped, so swapoff can make progress. |
556 | */ |
557 | if (PageSwapCache(page) && |
558 | page_count(page) != page_mapcount(page) + 2) { |
559 | ret = SWAP_FAIL; |
560 | goto out_unmap; |
561 | } |
562 | |
563 | /* Nuke the page table entry. */ |
564 | flush_cache_page(vma, address, page_to_pfn(page)); |
565 | pteval = ptep_clear_flush(vma, address, pte); |
566 | |
567 | /* Move the dirty bit to the physical page now the pte is gone. */ |
568 | if (pte_dirty(pteval)) |
569 | set_page_dirty(page); |
570 | |
571 | if (PageAnon(page)) { |
572 | swp_entry_t entry = { .val = page->private }; |
573 | /* |
574 | * Store the swap location in the pte. |
575 | * See handle_pte_fault() ... |
576 | */ |
577 | BUG_ON(!PageSwapCache(page)); |
578 | swap_duplicate(entry); |
579 | if (list_empty(&mm->mmlist)) { |
580 | spin_lock(&mmlist_lock); |
581 | list_add(&mm->mmlist, &init_mm.mmlist); |
582 | spin_unlock(&mmlist_lock); |
583 | } |
584 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); |
585 | BUG_ON(pte_file(*pte)); |
586 | dec_mm_counter(mm, anon_rss); |
587 | } |
588 | |
589 | dec_mm_counter(mm, rss); |
590 | page_remove_rmap(page); |
591 | page_cache_release(page); |
592 | |
593 | out_unmap: |
594 | pte_unmap(pte); |
595 | spin_unlock(&mm->page_table_lock); |
596 | out: |
597 | return ret; |
598 | } |
599 | |
600 | /* |
601 | * objrmap doesn't work for nonlinear VMAs because the assumption that |
602 | * offset-into-file correlates with offset-into-virtual-addresses does not hold. |
603 | * Consequently, given a particular page and its ->index, we cannot locate the |
604 | * ptes which are mapping that page without an exhaustive linear search. |
605 | * |
606 | * So what this code does is a mini "virtual scan" of each nonlinear VMA which |
607 | * maps the file to which the target page belongs. The ->vm_private_data field |
608 | * holds the current cursor into that scan. Successive searches will circulate |
609 | * around the vma's virtual address space. |
610 | * |
611 | * So as more replacement pressure is applied to the pages in a nonlinear VMA, |
612 | * more scanning pressure is placed against them as well. Eventually pages |
613 | * will become fully unmapped and are eligible for eviction. |
614 | * |
615 | * For very sparsely populated VMAs this is a little inefficient - chances are |
616 | * there there won't be many ptes located within the scan cluster. In this case |
617 | * maybe we could scan further - to the end of the pte page, perhaps. |
618 | */ |
619 | #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) |
620 | #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) |
621 | |
622 | static void try_to_unmap_cluster(unsigned long cursor, |
623 | unsigned int *mapcount, struct vm_area_struct *vma) |
624 | { |
625 | struct mm_struct *mm = vma->vm_mm; |
626 | pgd_t *pgd; |
627 | pud_t *pud; |
628 | pmd_t *pmd; |
629 | pte_t *pte, *original_pte; |
630 | pte_t pteval; |
631 | struct page *page; |
632 | unsigned long address; |
633 | unsigned long end; |
634 | unsigned long pfn; |
635 | |
636 | /* |
637 | * We need the page_table_lock to protect us from page faults, |
638 | * munmap, fork, etc... |
639 | */ |
640 | spin_lock(&mm->page_table_lock); |
641 | |
642 | address = (vma->vm_start + cursor) & CLUSTER_MASK; |
643 | end = address + CLUSTER_SIZE; |
644 | if (address < vma->vm_start) |
645 | address = vma->vm_start; |
646 | if (end > vma->vm_end) |
647 | end = vma->vm_end; |
648 | |
649 | pgd = pgd_offset(mm, address); |
650 | if (!pgd_present(*pgd)) |
651 | goto out_unlock; |
652 | |
653 | pud = pud_offset(pgd, address); |
654 | if (!pud_present(*pud)) |
655 | goto out_unlock; |
656 | |
657 | pmd = pmd_offset(pud, address); |
658 | if (!pmd_present(*pmd)) |
659 | goto out_unlock; |
660 | |
661 | for (original_pte = pte = pte_offset_map(pmd, address); |
662 | address < end; pte++, address += PAGE_SIZE) { |
663 | |
664 | if (!pte_present(*pte)) |
665 | continue; |
666 | |
667 | pfn = pte_pfn(*pte); |
668 | if (!pfn_valid(pfn)) |
669 | continue; |
670 | |
671 | page = pfn_to_page(pfn); |
672 | BUG_ON(PageAnon(page)); |
673 | if (PageReserved(page)) |
674 | continue; |
675 | |
676 | if (ptep_clear_flush_young(vma, address, pte)) |
677 | continue; |
678 | |
679 | /* Nuke the page table entry. */ |
680 | flush_cache_page(vma, address, pfn); |
681 | pteval = ptep_clear_flush(vma, address, pte); |
682 | |
683 | /* If nonlinear, store the file page offset in the pte. */ |
684 | if (page->index != linear_page_index(vma, address)) |
685 | set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); |
686 | |
687 | /* Move the dirty bit to the physical page now the pte is gone. */ |
688 | if (pte_dirty(pteval)) |
689 | set_page_dirty(page); |
690 | |
691 | page_remove_rmap(page); |
692 | page_cache_release(page); |
693 | dec_mm_counter(mm, rss); |
694 | (*mapcount)--; |
695 | } |
696 | |
697 | pte_unmap(original_pte); |
698 | out_unlock: |
699 | spin_unlock(&mm->page_table_lock); |
700 | } |
701 | |
702 | static int try_to_unmap_anon(struct page *page) |
703 | { |
704 | struct anon_vma *anon_vma; |
705 | struct vm_area_struct *vma; |
706 | int ret = SWAP_AGAIN; |
707 | |
708 | anon_vma = page_lock_anon_vma(page); |
709 | if (!anon_vma) |
710 | return ret; |
711 | |
712 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
713 | ret = try_to_unmap_one(page, vma); |
714 | if (ret == SWAP_FAIL || !page_mapped(page)) |
715 | break; |
716 | } |
717 | spin_unlock(&anon_vma->lock); |
718 | return ret; |
719 | } |
720 | |
721 | /** |
722 | * try_to_unmap_file - unmap file page using the object-based rmap method |
723 | * @page: the page to unmap |
724 | * |
725 | * Find all the mappings of a page using the mapping pointer and the vma chains |
726 | * contained in the address_space struct it points to. |
727 | * |
728 | * This function is only called from try_to_unmap for object-based pages. |
729 | */ |
730 | static int try_to_unmap_file(struct page *page) |
731 | { |
732 | struct address_space *mapping = page->mapping; |
733 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
734 | struct vm_area_struct *vma; |
735 | struct prio_tree_iter iter; |
736 | int ret = SWAP_AGAIN; |
737 | unsigned long cursor; |
738 | unsigned long max_nl_cursor = 0; |
739 | unsigned long max_nl_size = 0; |
740 | unsigned int mapcount; |
741 | |
742 | spin_lock(&mapping->i_mmap_lock); |
743 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
744 | ret = try_to_unmap_one(page, vma); |
745 | if (ret == SWAP_FAIL || !page_mapped(page)) |
746 | goto out; |
747 | } |
748 | |
749 | if (list_empty(&mapping->i_mmap_nonlinear)) |
750 | goto out; |
751 | |
752 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
753 | shared.vm_set.list) { |
754 | if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) |
755 | continue; |
756 | cursor = (unsigned long) vma->vm_private_data; |
757 | if (cursor > max_nl_cursor) |
758 | max_nl_cursor = cursor; |
759 | cursor = vma->vm_end - vma->vm_start; |
760 | if (cursor > max_nl_size) |
761 | max_nl_size = cursor; |
762 | } |
763 | |
764 | if (max_nl_size == 0) { /* any nonlinears locked or reserved */ |
765 | ret = SWAP_FAIL; |
766 | goto out; |
767 | } |
768 | |
769 | /* |
770 | * We don't try to search for this page in the nonlinear vmas, |
771 | * and page_referenced wouldn't have found it anyway. Instead |
772 | * just walk the nonlinear vmas trying to age and unmap some. |
773 | * The mapcount of the page we came in with is irrelevant, |
774 | * but even so use it as a guide to how hard we should try? |
775 | */ |
776 | mapcount = page_mapcount(page); |
777 | if (!mapcount) |
778 | goto out; |
779 | cond_resched_lock(&mapping->i_mmap_lock); |
780 | |
781 | max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; |
782 | if (max_nl_cursor == 0) |
783 | max_nl_cursor = CLUSTER_SIZE; |
784 | |
785 | do { |
786 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
787 | shared.vm_set.list) { |
788 | if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) |
789 | continue; |
790 | cursor = (unsigned long) vma->vm_private_data; |
791 | while (get_mm_counter(vma->vm_mm, rss) && |
792 | cursor < max_nl_cursor && |
793 | cursor < vma->vm_end - vma->vm_start) { |
794 | try_to_unmap_cluster(cursor, &mapcount, vma); |
795 | cursor += CLUSTER_SIZE; |
796 | vma->vm_private_data = (void *) cursor; |
797 | if ((int)mapcount <= 0) |
798 | goto out; |
799 | } |
800 | vma->vm_private_data = (void *) max_nl_cursor; |
801 | } |
802 | cond_resched_lock(&mapping->i_mmap_lock); |
803 | max_nl_cursor += CLUSTER_SIZE; |
804 | } while (max_nl_cursor <= max_nl_size); |
805 | |
806 | /* |
807 | * Don't loop forever (perhaps all the remaining pages are |
808 | * in locked vmas). Reset cursor on all unreserved nonlinear |
809 | * vmas, now forgetting on which ones it had fallen behind. |
810 | */ |
811 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
812 | shared.vm_set.list) { |
813 | if (!(vma->vm_flags & VM_RESERVED)) |
814 | vma->vm_private_data = NULL; |
815 | } |
816 | out: |
817 | spin_unlock(&mapping->i_mmap_lock); |
818 | return ret; |
819 | } |
820 | |
821 | /** |
822 | * try_to_unmap - try to remove all page table mappings to a page |
823 | * @page: the page to get unmapped |
824 | * |
825 | * Tries to remove all the page table entries which are mapping this |
826 | * page, used in the pageout path. Caller must hold the page lock. |
827 | * Return values are: |
828 | * |
829 | * SWAP_SUCCESS - we succeeded in removing all mappings |
830 | * SWAP_AGAIN - we missed a mapping, try again later |
831 | * SWAP_FAIL - the page is unswappable |
832 | */ |
833 | int try_to_unmap(struct page *page) |
834 | { |
835 | int ret; |
836 | |
837 | BUG_ON(PageReserved(page)); |
838 | BUG_ON(!PageLocked(page)); |
839 | |
840 | if (PageAnon(page)) |
841 | ret = try_to_unmap_anon(page); |
842 | else |
843 | ret = try_to_unmap_file(page); |
844 | |
845 | if (!page_mapped(page)) |
846 | ret = SWAP_SUCCESS; |
847 | return ret; |
848 | } |
849 |