Annotation of /trunk/kernel26-magellan/patches-2.6.21-r13/0011-2.6.21-mm-swap_prefetch-35.patch
Parent Directory | Revision Log
Revision 319 -
(hide annotations)
(download)
Sun Aug 19 18:14:21 2007 UTC (17 years, 1 month ago) by niro
File size: 30790 byte(s)
Sun Aug 19 18:14:21 2007 UTC (17 years, 1 month ago) by niro
File size: 30790 byte(s)
-2.6.21-magellan-r13
1 | niro | 319 | Implement swap prefetching when the vm is relatively idle and there is free |
2 | ram available. The code is based on some preliminary code by Thomas | ||
3 | Schlichter. | ||
4 | |||
5 | This stores a list of swapped entries in a list ordered most recently used | ||
6 | and a radix tree. It generates a low priority kernel thread running at | ||
7 | nice 19 to do the prefetching at a later stage. | ||
8 | |||
9 | Once pages have been added to the swapped list, a timer is started, testing | ||
10 | for conditions suitable to prefetch swap pages every 5 seconds. Suitable | ||
11 | conditions are defined as lack of swapping out or in any pages, and no | ||
12 | watermark tests failing. Significant amounts of dirtied ram and changes in | ||
13 | free ram representing disk writes or reads also prevent prefetching. | ||
14 | |||
15 | It then checks that we have spare ram looking for at least 3* pages_high | ||
16 | free per zone and if it succeeds that will prefetch pages from swap into | ||
17 | the swap cache. The pages are added to the tail of the inactive list to | ||
18 | preserve LRU ordering. | ||
19 | |||
20 | Pages are prefetched until the list is empty or the vm is seen as busy | ||
21 | according to the previously described criteria. Node data on numa is | ||
22 | stored with the entries and an appropriate zonelist based on this is used | ||
23 | when allocating ram. | ||
24 | |||
25 | The pages are copied to swap cache and kept on backing store. This allows | ||
26 | pressure on either physical ram or swap to readily find free pages without | ||
27 | further I/O. | ||
28 | |||
29 | Prefetching can be enabled/disabled via the tunable in | ||
30 | /proc/sys/vm/swap_prefetch initially set to 1 (enabled). | ||
31 | |||
32 | Enabling laptop_mode disables swap prefetching to prevent unnecessary spin | ||
33 | ups. | ||
34 | |||
35 | In testing on modern pc hardware this results in wall-clock time activation | ||
36 | of the firefox browser to speed up 5 fold after a worst case complete | ||
37 | swap-out of the browser on a static web page. | ||
38 | |||
39 | From: Ingo Molnar <mingo@elte.hu> | ||
40 | |||
41 | Fix potential swap-prefetch deadlock, found by the locking correctness | ||
42 | validator. | ||
43 | |||
44 | Signed-off-by: Con Kolivas <kernel@kolivas.org> | ||
45 | Signed-off-by: Ingo Molnar <mingo@elte.hu> | ||
46 | Signed-off-by: Andrew Morton <akpm@osdl.org> | ||
47 | |||
48 | Documentation/sysctl/vm.txt | 12 | ||
49 | include/linux/mm_inline.h | 7 | ||
50 | include/linux/swap-prefetch.h | 55 +++ | ||
51 | include/linux/swap.h | 2 | ||
52 | init/Kconfig | 22 + | ||
53 | kernel/sysctl.c | 11 | ||
54 | mm/Makefile | 1 | ||
55 | mm/swap.c | 48 +++ | ||
56 | mm/swap_prefetch.c | 581 ++++++++++++++++++++++++++++++++++++++++++ | ||
57 | mm/swap_state.c | 11 | ||
58 | mm/vmscan.c | 6 | ||
59 | 11 files changed, 755 insertions(+), 1 deletion(-) | ||
60 | |||
61 | Index: linux-2.6.21-ck2/Documentation/sysctl/vm.txt | ||
62 | =================================================================== | ||
63 | --- linux-2.6.21-ck2.orig/Documentation/sysctl/vm.txt 2007-05-14 19:49:19.000000000 +1000 | ||
64 | +++ linux-2.6.21-ck2/Documentation/sysctl/vm.txt 2007-05-14 19:49:55.000000000 +1000 | ||
65 | @@ -31,6 +31,7 @@ Currently, these files are in /proc/sys/ | ||
66 | - min_unmapped_ratio | ||
67 | - min_slab_ratio | ||
68 | - panic_on_oom | ||
69 | +- swap_prefetch | ||
70 | |||
71 | ============================================================== | ||
72 | |||
73 | @@ -205,3 +206,14 @@ rather than killing rogue processes, set | ||
74 | |||
75 | The default value is 0. | ||
76 | |||
77 | +============================================================== | ||
78 | + | ||
79 | +swap_prefetch | ||
80 | + | ||
81 | +This enables or disables the swap prefetching feature. When the virtual | ||
82 | +memory subsystem has been extremely idle for at least 5 seconds it will start | ||
83 | +copying back pages from swap into the swapcache and keep a copy in swap. In | ||
84 | +practice it can take many minutes before the vm is idle enough. | ||
85 | + | ||
86 | +The default value is 1. | ||
87 | + | ||
88 | Index: linux-2.6.21-ck2/include/linux/swap.h | ||
89 | =================================================================== | ||
90 | --- linux-2.6.21-ck2.orig/include/linux/swap.h 2007-05-14 19:49:19.000000000 +1000 | ||
91 | +++ linux-2.6.21-ck2/include/linux/swap.h 2007-05-14 19:49:55.000000000 +1000 | ||
92 | @@ -180,6 +180,7 @@ extern unsigned int nr_free_pagecache_pa | ||
93 | /* linux/mm/swap.c */ | ||
94 | extern void FASTCALL(lru_cache_add(struct page *)); | ||
95 | extern void FASTCALL(lru_cache_add_active(struct page *)); | ||
96 | +extern void FASTCALL(lru_cache_add_tail(struct page *)); | ||
97 | extern void FASTCALL(activate_page(struct page *)); | ||
98 | extern void FASTCALL(mark_page_accessed(struct page *)); | ||
99 | extern void lru_add_drain(void); | ||
100 | @@ -237,6 +238,7 @@ extern void free_pages_and_swap_cache(st | ||
101 | extern struct page * lookup_swap_cache(swp_entry_t); | ||
102 | extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma, | ||
103 | unsigned long addr); | ||
104 | +extern int add_to_swap_cache(struct page *page, swp_entry_t entry); | ||
105 | /* linux/mm/swapfile.c */ | ||
106 | extern long total_swap_pages; | ||
107 | extern unsigned int nr_swapfiles; | ||
108 | Index: linux-2.6.21-ck2/init/Kconfig | ||
109 | =================================================================== | ||
110 | --- linux-2.6.21-ck2.orig/init/Kconfig 2007-05-14 19:49:19.000000000 +1000 | ||
111 | +++ linux-2.6.21-ck2/init/Kconfig 2007-05-14 19:49:55.000000000 +1000 | ||
112 | @@ -101,6 +101,28 @@ config SWAP | ||
113 | used to provide more virtual memory than the actual RAM present | ||
114 | in your computer. If unsure say Y. | ||
115 | |||
116 | +config SWAP_PREFETCH | ||
117 | + bool "Support for prefetching swapped memory" | ||
118 | + depends on SWAP | ||
119 | + default y | ||
120 | + ---help--- | ||
121 | + This option will allow the kernel to prefetch swapped memory pages | ||
122 | + when idle. The pages will be kept on both swap and in swap_cache | ||
123 | + thus avoiding the need for further I/O if either ram or swap space | ||
124 | + is required. | ||
125 | + | ||
126 | + What this will do on workstations is slowly bring back applications | ||
127 | + that have swapped out after memory intensive workloads back into | ||
128 | + physical ram if you have free ram at a later stage and the machine | ||
129 | + is relatively idle. This means that when you come back to your | ||
130 | + computer after leaving it idle for a while, applications will come | ||
131 | + to life faster. Note that your swap usage will appear to increase | ||
132 | + but these are cached pages, can be dropped freely by the vm, and it | ||
133 | + should stabilise around 50% swap usage maximum. | ||
134 | + | ||
135 | + Workstations and multiuser workstation servers will most likely want | ||
136 | + to say Y. | ||
137 | + | ||
138 | config SYSVIPC | ||
139 | bool "System V IPC" | ||
140 | ---help--- | ||
141 | Index: linux-2.6.21-ck2/kernel/sysctl.c | ||
142 | =================================================================== | ||
143 | --- linux-2.6.21-ck2.orig/kernel/sysctl.c 2007-05-14 19:49:19.000000000 +1000 | ||
144 | +++ linux-2.6.21-ck2/kernel/sysctl.c 2007-05-14 19:49:55.000000000 +1000 | ||
145 | @@ -22,6 +22,7 @@ | ||
146 | #include <linux/mm.h> | ||
147 | #include <linux/swap.h> | ||
148 | #include <linux/slab.h> | ||
149 | +#include <linux/swap-prefetch.h> | ||
150 | #include <linux/sysctl.h> | ||
151 | #include <linux/proc_fs.h> | ||
152 | #include <linux/capability.h> | ||
153 | @@ -906,6 +907,16 @@ static ctl_table vm_table[] = { | ||
154 | .extra1 = &zero, | ||
155 | }, | ||
156 | #endif | ||
157 | +#ifdef CONFIG_SWAP_PREFETCH | ||
158 | + { | ||
159 | + .ctl_name = CTL_UNNUMBERED, | ||
160 | + .procname = "swap_prefetch", | ||
161 | + .data = &swap_prefetch, | ||
162 | + .maxlen = sizeof(swap_prefetch), | ||
163 | + .mode = 0644, | ||
164 | + .proc_handler = &proc_dointvec, | ||
165 | + }, | ||
166 | +#endif | ||
167 | { .ctl_name = 0 } | ||
168 | }; | ||
169 | |||
170 | Index: linux-2.6.21-ck2/mm/Makefile | ||
171 | =================================================================== | ||
172 | --- linux-2.6.21-ck2.orig/mm/Makefile 2007-05-14 19:49:19.000000000 +1000 | ||
173 | +++ linux-2.6.21-ck2/mm/Makefile 2007-05-14 19:49:55.000000000 +1000 | ||
174 | @@ -17,6 +17,7 @@ ifeq ($(CONFIG_MMU)$(CONFIG_BLOCK),yy) | ||
175 | obj-y += bounce.o | ||
176 | endif | ||
177 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o | ||
178 | +obj-$(CONFIG_SWAP_PREFETCH) += swap_prefetch.o | ||
179 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o | ||
180 | obj-$(CONFIG_NUMA) += mempolicy.o | ||
181 | obj-$(CONFIG_SPARSEMEM) += sparse.o | ||
182 | Index: linux-2.6.21-ck2/mm/swap.c | ||
183 | =================================================================== | ||
184 | --- linux-2.6.21-ck2.orig/mm/swap.c 2007-05-14 19:49:19.000000000 +1000 | ||
185 | +++ linux-2.6.21-ck2/mm/swap.c 2007-05-14 19:49:55.000000000 +1000 | ||
186 | @@ -17,6 +17,7 @@ | ||
187 | #include <linux/sched.h> | ||
188 | #include <linux/kernel_stat.h> | ||
189 | #include <linux/swap.h> | ||
190 | +#include <linux/swap-prefetch.h> | ||
191 | #include <linux/mman.h> | ||
192 | #include <linux/pagemap.h> | ||
193 | #include <linux/pagevec.h> | ||
194 | @@ -176,6 +177,7 @@ EXPORT_SYMBOL(mark_page_accessed); | ||
195 | */ | ||
196 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; | ||
197 | static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; | ||
198 | +static DEFINE_PER_CPU(struct pagevec, lru_add_tail_pvecs) = { 0, }; | ||
199 | |||
200 | void fastcall lru_cache_add(struct page *page) | ||
201 | { | ||
202 | @@ -197,6 +199,31 @@ void fastcall lru_cache_add_active(struc | ||
203 | put_cpu_var(lru_add_active_pvecs); | ||
204 | } | ||
205 | |||
206 | +static void __pagevec_lru_add_tail(struct pagevec *pvec) | ||
207 | +{ | ||
208 | + int i; | ||
209 | + struct zone *zone = NULL; | ||
210 | + | ||
211 | + for (i = 0; i < pagevec_count(pvec); i++) { | ||
212 | + struct page *page = pvec->pages[i]; | ||
213 | + struct zone *pagezone = page_zone(page); | ||
214 | + | ||
215 | + if (pagezone != zone) { | ||
216 | + if (zone) | ||
217 | + spin_unlock_irq(&zone->lru_lock); | ||
218 | + zone = pagezone; | ||
219 | + spin_lock_irq(&zone->lru_lock); | ||
220 | + } | ||
221 | + BUG_ON(PageLRU(page)); | ||
222 | + SetPageLRU(page); | ||
223 | + add_page_to_inactive_list_tail(zone, page); | ||
224 | + } | ||
225 | + if (zone) | ||
226 | + spin_unlock_irq(&zone->lru_lock); | ||
227 | + release_pages(pvec->pages, pvec->nr, pvec->cold); | ||
228 | + pagevec_reinit(pvec); | ||
229 | +} | ||
230 | + | ||
231 | static void __lru_add_drain(int cpu) | ||
232 | { | ||
233 | struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); | ||
234 | @@ -207,6 +234,9 @@ static void __lru_add_drain(int cpu) | ||
235 | pvec = &per_cpu(lru_add_active_pvecs, cpu); | ||
236 | if (pagevec_count(pvec)) | ||
237 | __pagevec_lru_add_active(pvec); | ||
238 | + pvec = &per_cpu(lru_add_tail_pvecs, cpu); | ||
239 | + if (pagevec_count(pvec)) | ||
240 | + __pagevec_lru_add_tail(pvec); | ||
241 | } | ||
242 | |||
243 | void lru_add_drain(void) | ||
244 | @@ -403,6 +433,21 @@ void __pagevec_lru_add_active(struct pag | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | + * Function used uniquely to put pages back to the lru at the end of the | ||
249 | + * inactive list to preserve the lru order. Currently only used by swap | ||
250 | + * prefetch. | ||
251 | + */ | ||
252 | +void fastcall lru_cache_add_tail(struct page *page) | ||
253 | +{ | ||
254 | + struct pagevec *pvec = &get_cpu_var(lru_add_tail_pvecs); | ||
255 | + | ||
256 | + page_cache_get(page); | ||
257 | + if (!pagevec_add(pvec, page)) | ||
258 | + __pagevec_lru_add_tail(pvec); | ||
259 | + put_cpu_var(lru_add_pvecs); | ||
260 | +} | ||
261 | + | ||
262 | +/* | ||
263 | * Try to drop buffers from the pages in a pagevec | ||
264 | */ | ||
265 | void pagevec_strip(struct pagevec *pvec) | ||
266 | @@ -514,6 +559,9 @@ void __init swap_setup(void) | ||
267 | * Right now other parts of the system means that we | ||
268 | * _really_ don't want to cluster much more | ||
269 | */ | ||
270 | + | ||
271 | + prepare_swap_prefetch(); | ||
272 | + | ||
273 | #ifdef CONFIG_HOTPLUG_CPU | ||
274 | hotcpu_notifier(cpu_swap_callback, 0); | ||
275 | #endif | ||
276 | Index: linux-2.6.21-ck2/mm/swap_prefetch.c | ||
277 | =================================================================== | ||
278 | --- /dev/null 1970-01-01 00:00:00.000000000 +0000 | ||
279 | +++ linux-2.6.21-ck2/mm/swap_prefetch.c 2007-05-14 19:49:55.000000000 +1000 | ||
280 | @@ -0,0 +1,581 @@ | ||
281 | +/* | ||
282 | + * linux/mm/swap_prefetch.c | ||
283 | + * | ||
284 | + * Copyright (C) 2005-2006 Con Kolivas | ||
285 | + * | ||
286 | + * Written by Con Kolivas <kernel@kolivas.org> | ||
287 | + * | ||
288 | + * This program is free software; you can redistribute it and/or modify | ||
289 | + * it under the terms of the GNU General Public License version 2 as | ||
290 | + * published by the Free Software Foundation. | ||
291 | + */ | ||
292 | + | ||
293 | +#include <linux/fs.h> | ||
294 | +#include <linux/mm.h> | ||
295 | +#include <linux/swap.h> | ||
296 | +#include <linux/swap-prefetch.h> | ||
297 | +#include <linux/ioprio.h> | ||
298 | +#include <linux/kthread.h> | ||
299 | +#include <linux/pagemap.h> | ||
300 | +#include <linux/syscalls.h> | ||
301 | +#include <linux/writeback.h> | ||
302 | +#include <linux/vmstat.h> | ||
303 | +#include <linux/freezer.h> | ||
304 | + | ||
305 | +/* | ||
306 | + * Time to delay prefetching if vm is busy or prefetching unsuccessful. There | ||
307 | + * needs to be at least this duration of idle time meaning in practice it can | ||
308 | + * be much longer | ||
309 | + */ | ||
310 | +#define PREFETCH_DELAY (HZ * 5) | ||
311 | + | ||
312 | +/* sysctl - enable/disable swap prefetching */ | ||
313 | +int swap_prefetch __read_mostly = 1; | ||
314 | + | ||
315 | +struct swapped_root { | ||
316 | + unsigned long busy; /* vm busy */ | ||
317 | + spinlock_t lock; /* protects all data */ | ||
318 | + struct list_head list; /* MRU list of swapped pages */ | ||
319 | + struct radix_tree_root swap_tree; /* Lookup tree of pages */ | ||
320 | + unsigned int count; /* Number of entries */ | ||
321 | + unsigned int maxcount; /* Maximum entries allowed */ | ||
322 | + struct kmem_cache *cache; /* Of struct swapped_entry */ | ||
323 | +}; | ||
324 | + | ||
325 | +static struct swapped_root swapped = { | ||
326 | + .lock = SPIN_LOCK_UNLOCKED, | ||
327 | + .list = LIST_HEAD_INIT(swapped.list), | ||
328 | + .swap_tree = RADIX_TREE_INIT(GFP_ATOMIC), | ||
329 | +}; | ||
330 | + | ||
331 | +static struct task_struct *kprefetchd_task; | ||
332 | + | ||
333 | +/* | ||
334 | + * We check to see no part of the vm is busy. If it is this will interrupt | ||
335 | + * trickle_swap and wait another PREFETCH_DELAY. Purposefully racy. | ||
336 | + */ | ||
337 | +inline void delay_swap_prefetch(void) | ||
338 | +{ | ||
339 | + if (!test_bit(0, &swapped.busy)) | ||
340 | + __set_bit(0, &swapped.busy); | ||
341 | +} | ||
342 | + | ||
343 | +/* | ||
344 | + * Drop behind accounting which keeps a list of the most recently used swap | ||
345 | + * entries. | ||
346 | + */ | ||
347 | +void add_to_swapped_list(struct page *page) | ||
348 | +{ | ||
349 | + struct swapped_entry *entry; | ||
350 | + unsigned long index, flags; | ||
351 | + int wakeup; | ||
352 | + | ||
353 | + if (!swap_prefetch) | ||
354 | + return; | ||
355 | + | ||
356 | + wakeup = 0; | ||
357 | + | ||
358 | + spin_lock_irqsave(&swapped.lock, flags); | ||
359 | + if (swapped.count >= swapped.maxcount) { | ||
360 | + /* | ||
361 | + * We limit the number of entries to 2/3 of physical ram. | ||
362 | + * Once the number of entries exceeds this we start removing | ||
363 | + * the least recently used entries. | ||
364 | + */ | ||
365 | + entry = list_entry(swapped.list.next, | ||
366 | + struct swapped_entry, swapped_list); | ||
367 | + radix_tree_delete(&swapped.swap_tree, entry->swp_entry.val); | ||
368 | + list_del(&entry->swapped_list); | ||
369 | + swapped.count--; | ||
370 | + } else { | ||
371 | + entry = kmem_cache_alloc(swapped.cache, GFP_ATOMIC); | ||
372 | + if (unlikely(!entry)) | ||
373 | + /* bad, can't allocate more mem */ | ||
374 | + goto out_locked; | ||
375 | + } | ||
376 | + | ||
377 | + index = page_private(page); | ||
378 | + entry->swp_entry.val = index; | ||
379 | + /* | ||
380 | + * On numa we need to store the node id to ensure that we prefetch to | ||
381 | + * the same node it came from. | ||
382 | + */ | ||
383 | + store_swap_entry_node(entry, page); | ||
384 | + | ||
385 | + if (likely(!radix_tree_insert(&swapped.swap_tree, index, entry))) { | ||
386 | + /* | ||
387 | + * If this is the first entry, kprefetchd needs to be | ||
388 | + * (re)started. | ||
389 | + */ | ||
390 | + if (!swapped.count) | ||
391 | + wakeup = 1; | ||
392 | + list_add(&entry->swapped_list, &swapped.list); | ||
393 | + swapped.count++; | ||
394 | + } | ||
395 | + | ||
396 | +out_locked: | ||
397 | + spin_unlock_irqrestore(&swapped.lock, flags); | ||
398 | + | ||
399 | + /* Do the wakeup outside the lock to shorten lock hold time. */ | ||
400 | + if (wakeup) | ||
401 | + wake_up_process(kprefetchd_task); | ||
402 | + | ||
403 | + return; | ||
404 | +} | ||
405 | + | ||
406 | +/* | ||
407 | + * Removes entries from the swapped_list. The radix tree allows us to quickly | ||
408 | + * look up the entry from the index without having to iterate over the whole | ||
409 | + * list. | ||
410 | + */ | ||
411 | +void remove_from_swapped_list(const unsigned long index) | ||
412 | +{ | ||
413 | + struct swapped_entry *entry; | ||
414 | + unsigned long flags; | ||
415 | + | ||
416 | + if (list_empty(&swapped.list)) | ||
417 | + return; | ||
418 | + | ||
419 | + spin_lock_irqsave(&swapped.lock, flags); | ||
420 | + entry = radix_tree_delete(&swapped.swap_tree, index); | ||
421 | + if (likely(entry)) { | ||
422 | + list_del_init(&entry->swapped_list); | ||
423 | + swapped.count--; | ||
424 | + kmem_cache_free(swapped.cache, entry); | ||
425 | + } | ||
426 | + spin_unlock_irqrestore(&swapped.lock, flags); | ||
427 | +} | ||
428 | + | ||
429 | +enum trickle_return { | ||
430 | + TRICKLE_SUCCESS, | ||
431 | + TRICKLE_FAILED, | ||
432 | + TRICKLE_DELAY, | ||
433 | +}; | ||
434 | + | ||
435 | +struct node_stats { | ||
436 | + unsigned long last_free; | ||
437 | + /* Free ram after a cycle of prefetching */ | ||
438 | + unsigned long current_free; | ||
439 | + /* Free ram on this cycle of checking prefetch_suitable */ | ||
440 | + unsigned long prefetch_watermark; | ||
441 | + /* Maximum amount we will prefetch to */ | ||
442 | + unsigned long highfree[MAX_NR_ZONES]; | ||
443 | + /* The amount of free ram before we start prefetching */ | ||
444 | + unsigned long lowfree[MAX_NR_ZONES]; | ||
445 | + /* The amount of free ram where we will stop prefetching */ | ||
446 | + unsigned long *pointfree[MAX_NR_ZONES]; | ||
447 | + /* highfree or lowfree depending on whether we've hit a watermark */ | ||
448 | +}; | ||
449 | + | ||
450 | +/* | ||
451 | + * prefetch_stats stores the free ram data of each node and this is used to | ||
452 | + * determine if a node is suitable for prefetching into. | ||
453 | + */ | ||
454 | +struct prefetch_stats { | ||
455 | + nodemask_t prefetch_nodes; | ||
456 | + /* Which nodes are currently suited to prefetching */ | ||
457 | + unsigned long prefetched_pages; | ||
458 | + /* Total pages we've prefetched on this wakeup of kprefetchd */ | ||
459 | + struct node_stats node[MAX_NUMNODES]; | ||
460 | +}; | ||
461 | + | ||
462 | +static struct prefetch_stats sp_stat; | ||
463 | + | ||
464 | +/* | ||
465 | + * This tries to read a swp_entry_t into swap cache for swap prefetching. | ||
466 | + * If it returns TRICKLE_DELAY we should delay further prefetching. | ||
467 | + */ | ||
468 | +static enum trickle_return trickle_swap_cache_async(const swp_entry_t entry, | ||
469 | + const int node) | ||
470 | +{ | ||
471 | + enum trickle_return ret = TRICKLE_FAILED; | ||
472 | + struct page *page; | ||
473 | + | ||
474 | + read_lock_irq(&swapper_space.tree_lock); | ||
475 | + /* Entry may already exist */ | ||
476 | + page = radix_tree_lookup(&swapper_space.page_tree, entry.val); | ||
477 | + read_unlock_irq(&swapper_space.tree_lock); | ||
478 | + if (page) { | ||
479 | + remove_from_swapped_list(entry.val); | ||
480 | + goto out; | ||
481 | + } | ||
482 | + | ||
483 | + /* | ||
484 | + * Get a new page to read from swap. We have already checked the | ||
485 | + * watermarks so __alloc_pages will not call on reclaim. | ||
486 | + */ | ||
487 | + page = alloc_pages_node(node, GFP_HIGHUSER & ~__GFP_WAIT, 0); | ||
488 | + if (unlikely(!page)) { | ||
489 | + ret = TRICKLE_DELAY; | ||
490 | + goto out; | ||
491 | + } | ||
492 | + | ||
493 | + if (add_to_swap_cache(page, entry)) { | ||
494 | + /* Failed to add to swap cache */ | ||
495 | + goto out_release; | ||
496 | + } | ||
497 | + | ||
498 | + /* Add them to the tail of the inactive list to preserve LRU order */ | ||
499 | + lru_cache_add_tail(page); | ||
500 | + if (unlikely(swap_readpage(NULL, page))) { | ||
501 | + ret = TRICKLE_DELAY; | ||
502 | + goto out_release; | ||
503 | + } | ||
504 | + | ||
505 | + sp_stat.prefetched_pages++; | ||
506 | + sp_stat.node[node].last_free--; | ||
507 | + | ||
508 | + ret = TRICKLE_SUCCESS; | ||
509 | +out_release: | ||
510 | + page_cache_release(page); | ||
511 | +out: | ||
512 | + return ret; | ||
513 | +} | ||
514 | + | ||
515 | +static void clear_last_prefetch_free(void) | ||
516 | +{ | ||
517 | + int node; | ||
518 | + | ||
519 | + /* | ||
520 | + * Reset the nodes suitable for prefetching to all nodes. We could | ||
521 | + * update the data to take into account memory hotplug if desired.. | ||
522 | + */ | ||
523 | + sp_stat.prefetch_nodes = node_online_map; | ||
524 | + for_each_node_mask(node, sp_stat.prefetch_nodes) { | ||
525 | + struct node_stats *ns = &sp_stat.node[node]; | ||
526 | + | ||
527 | + ns->last_free = 0; | ||
528 | + } | ||
529 | +} | ||
530 | + | ||
531 | +static void clear_current_prefetch_free(void) | ||
532 | +{ | ||
533 | + int node; | ||
534 | + | ||
535 | + sp_stat.prefetch_nodes = node_online_map; | ||
536 | + for_each_node_mask(node, sp_stat.prefetch_nodes) { | ||
537 | + struct node_stats *ns = &sp_stat.node[node]; | ||
538 | + | ||
539 | + ns->current_free = 0; | ||
540 | + } | ||
541 | +} | ||
542 | + | ||
543 | +/* | ||
544 | + * This updates the high and low watermarks of amount of free ram in each | ||
545 | + * node used to start and stop prefetching. We prefetch from pages_high * 4 | ||
546 | + * down to pages_high * 3. | ||
547 | + */ | ||
548 | +static void examine_free_limits(void) | ||
549 | +{ | ||
550 | + struct zone *z; | ||
551 | + | ||
552 | + for_each_zone(z) { | ||
553 | + struct node_stats *ns; | ||
554 | + int idx; | ||
555 | + | ||
556 | + if (!populated_zone(z)) | ||
557 | + continue; | ||
558 | + | ||
559 | + ns = &sp_stat.node[z->zone_pgdat->node_id]; | ||
560 | + idx = zone_idx(z); | ||
561 | + ns->lowfree[idx] = z->pages_high * 3; | ||
562 | + ns->highfree[idx] = ns->lowfree[idx] + z->pages_high; | ||
563 | + | ||
564 | + if (zone_page_state(z, NR_FREE_PAGES) > ns->highfree[idx]) { | ||
565 | + /* | ||
566 | + * We've gotten above the high watermark of free pages | ||
567 | + * so we can start prefetching till we get to the low | ||
568 | + * watermark. | ||
569 | + */ | ||
570 | + ns->pointfree[idx] = &ns->lowfree[idx]; | ||
571 | + } | ||
572 | + } | ||
573 | +} | ||
574 | + | ||
575 | +/* | ||
576 | + * We want to be absolutely certain it's ok to start prefetching. | ||
577 | + */ | ||
578 | +static int prefetch_suitable(void) | ||
579 | +{ | ||
580 | + unsigned long limit; | ||
581 | + struct zone *z; | ||
582 | + int node, ret = 0, test_pagestate = 0; | ||
583 | + | ||
584 | + /* Purposefully racy */ | ||
585 | + if (test_bit(0, &swapped.busy)) { | ||
586 | + __clear_bit(0, &swapped.busy); | ||
587 | + goto out; | ||
588 | + } | ||
589 | + | ||
590 | + /* | ||
591 | + * get_page_state and above_background_load are expensive so we only | ||
592 | + * perform them every SWAP_CLUSTER_MAX prefetched_pages. | ||
593 | + * We test to see if we're above_background_load as disk activity | ||
594 | + * even at low priority can cause interrupt induced scheduling | ||
595 | + * latencies. | ||
596 | + */ | ||
597 | + if (!(sp_stat.prefetched_pages % SWAP_CLUSTER_MAX)) { | ||
598 | + if (above_background_load()) | ||
599 | + goto out; | ||
600 | + test_pagestate = 1; | ||
601 | + } | ||
602 | + | ||
603 | + clear_current_prefetch_free(); | ||
604 | + | ||
605 | + /* | ||
606 | + * Have some hysteresis between where page reclaiming and prefetching | ||
607 | + * will occur to prevent ping-ponging between them. | ||
608 | + */ | ||
609 | + for_each_zone(z) { | ||
610 | + struct node_stats *ns; | ||
611 | + unsigned long free; | ||
612 | + int idx; | ||
613 | + | ||
614 | + if (!populated_zone(z)) | ||
615 | + continue; | ||
616 | + | ||
617 | + node = z->zone_pgdat->node_id; | ||
618 | + ns = &sp_stat.node[node]; | ||
619 | + idx = zone_idx(z); | ||
620 | + | ||
621 | + free = zone_page_state(z, NR_FREE_PAGES); | ||
622 | + if (free < *ns->pointfree[idx]) { | ||
623 | + /* | ||
624 | + * Free pages have dropped below the low watermark so | ||
625 | + * we won't start prefetching again till we hit the | ||
626 | + * high watermark of free pages. | ||
627 | + */ | ||
628 | + ns->pointfree[idx] = &ns->highfree[idx]; | ||
629 | + node_clear(node, sp_stat.prefetch_nodes); | ||
630 | + continue; | ||
631 | + } | ||
632 | + ns->current_free += free; | ||
633 | + } | ||
634 | + | ||
635 | + /* | ||
636 | + * We iterate over each node testing to see if it is suitable for | ||
637 | + * prefetching and clear the nodemask if it is not. | ||
638 | + */ | ||
639 | + for_each_node_mask(node, sp_stat.prefetch_nodes) { | ||
640 | + struct node_stats *ns = &sp_stat.node[node]; | ||
641 | + | ||
642 | + /* | ||
643 | + * We check to see that pages are not being allocated | ||
644 | + * elsewhere at any significant rate implying any | ||
645 | + * degree of memory pressure (eg during file reads) | ||
646 | + */ | ||
647 | + if (ns->last_free) { | ||
648 | + if (ns->current_free + SWAP_CLUSTER_MAX < | ||
649 | + ns->last_free) { | ||
650 | + ns->last_free = ns->current_free; | ||
651 | + node_clear(node, | ||
652 | + sp_stat.prefetch_nodes); | ||
653 | + continue; | ||
654 | + } | ||
655 | + } else | ||
656 | + ns->last_free = ns->current_free; | ||
657 | + | ||
658 | + if (!test_pagestate) | ||
659 | + continue; | ||
660 | + | ||
661 | + /* We shouldn't prefetch when we are doing writeback */ | ||
662 | + if (node_page_state(node, NR_WRITEBACK)) { | ||
663 | + node_clear(node, sp_stat.prefetch_nodes); | ||
664 | + continue; | ||
665 | + } | ||
666 | + | ||
667 | + /* | ||
668 | + * >2/3 of the ram on this node is mapped, slab, swapcache or | ||
669 | + * dirty, we need to leave some free for pagecache. | ||
670 | + */ | ||
671 | + limit = node_page_state(node, NR_FILE_PAGES); | ||
672 | + limit += node_page_state(node, NR_SLAB_RECLAIMABLE); | ||
673 | + limit += node_page_state(node, NR_SLAB_UNRECLAIMABLE); | ||
674 | + limit += node_page_state(node, NR_FILE_DIRTY); | ||
675 | + limit += node_page_state(node, NR_UNSTABLE_NFS); | ||
676 | + limit += total_swapcache_pages; | ||
677 | + if (limit > ns->prefetch_watermark) { | ||
678 | + node_clear(node, sp_stat.prefetch_nodes); | ||
679 | + continue; | ||
680 | + } | ||
681 | + } | ||
682 | + | ||
683 | + if (nodes_empty(sp_stat.prefetch_nodes)) | ||
684 | + goto out; | ||
685 | + | ||
686 | + /* Survived all that? Hooray we can prefetch! */ | ||
687 | + ret = 1; | ||
688 | +out: | ||
689 | + return ret; | ||
690 | +} | ||
691 | + | ||
692 | +/* | ||
693 | + * Get previous swapped entry when iterating over all entries. swapped.lock | ||
694 | + * should be held and we should already ensure that entry exists. | ||
695 | + */ | ||
696 | +static inline struct swapped_entry *prev_swapped_entry | ||
697 | + (struct swapped_entry *entry) | ||
698 | +{ | ||
699 | + return list_entry(entry->swapped_list.prev->prev, | ||
700 | + struct swapped_entry, swapped_list); | ||
701 | +} | ||
702 | + | ||
703 | +/* | ||
704 | + * trickle_swap is the main function that initiates the swap prefetching. It | ||
705 | + * first checks to see if the busy flag is set, and does not prefetch if it | ||
706 | + * is, as the flag implied we are low on memory or swapping in currently. | ||
707 | + * Otherwise it runs until prefetch_suitable fails which occurs when the | ||
708 | + * vm is busy, we prefetch to the watermark, or the list is empty or we have | ||
709 | + * iterated over all entries | ||
710 | + */ | ||
711 | +static enum trickle_return trickle_swap(void) | ||
712 | +{ | ||
713 | + enum trickle_return ret = TRICKLE_DELAY; | ||
714 | + struct swapped_entry *entry; | ||
715 | + unsigned long flags; | ||
716 | + | ||
717 | + /* | ||
718 | + * If laptop_mode is enabled don't prefetch to avoid hard drives | ||
719 | + * doing unnecessary spin-ups | ||
720 | + */ | ||
721 | + if (!swap_prefetch || laptop_mode) | ||
722 | + return ret; | ||
723 | + | ||
724 | + examine_free_limits(); | ||
725 | + entry = NULL; | ||
726 | + | ||
727 | + for ( ; ; ) { | ||
728 | + swp_entry_t swp_entry; | ||
729 | + int node; | ||
730 | + | ||
731 | + if (!prefetch_suitable()) | ||
732 | + break; | ||
733 | + | ||
734 | + spin_lock_irqsave(&swapped.lock, flags); | ||
735 | + if (list_empty(&swapped.list)) { | ||
736 | + ret = TRICKLE_FAILED; | ||
737 | + spin_unlock_irqrestore(&swapped.lock, flags); | ||
738 | + break; | ||
739 | + } | ||
740 | + | ||
741 | + if (!entry) { | ||
742 | + /* | ||
743 | + * This sets the entry for the first iteration. It | ||
744 | + * also is a safeguard against the entry disappearing | ||
745 | + * while the lock is not held. | ||
746 | + */ | ||
747 | + entry = list_entry(swapped.list.prev, | ||
748 | + struct swapped_entry, swapped_list); | ||
749 | + } else if (entry->swapped_list.prev == swapped.list.next) { | ||
750 | + /* | ||
751 | + * If we have iterated over all entries and there are | ||
752 | + * still entries that weren't swapped out there may | ||
753 | + * be a reason we could not swap them back in so | ||
754 | + * delay attempting further prefetching. | ||
755 | + */ | ||
756 | + spin_unlock_irqrestore(&swapped.lock, flags); | ||
757 | + break; | ||
758 | + } | ||
759 | + | ||
760 | + node = get_swap_entry_node(entry); | ||
761 | + if (!node_isset(node, sp_stat.prefetch_nodes)) { | ||
762 | + /* | ||
763 | + * We found an entry that belongs to a node that is | ||
764 | + * not suitable for prefetching so skip it. | ||
765 | + */ | ||
766 | + entry = prev_swapped_entry(entry); | ||
767 | + spin_unlock_irqrestore(&swapped.lock, flags); | ||
768 | + continue; | ||
769 | + } | ||
770 | + swp_entry = entry->swp_entry; | ||
771 | + entry = prev_swapped_entry(entry); | ||
772 | + spin_unlock_irqrestore(&swapped.lock, flags); | ||
773 | + | ||
774 | + if (trickle_swap_cache_async(swp_entry, node) == TRICKLE_DELAY) | ||
775 | + break; | ||
776 | + } | ||
777 | + | ||
778 | + if (sp_stat.prefetched_pages) { | ||
779 | + lru_add_drain(); | ||
780 | + sp_stat.prefetched_pages = 0; | ||
781 | + } | ||
782 | + return ret; | ||
783 | +} | ||
784 | + | ||
785 | +static int kprefetchd(void *__unused) | ||
786 | +{ | ||
787 | + struct sched_param param = { .sched_priority = 0 }; | ||
788 | + | ||
789 | + sched_setscheduler(current, SCHED_BATCH, ¶m); | ||
790 | + set_user_nice(current, 19); | ||
791 | + /* Set ioprio to lowest if supported by i/o scheduler */ | ||
792 | + sys_ioprio_set(IOPRIO_WHO_PROCESS, 0, IOPRIO_CLASS_IDLE); | ||
793 | + | ||
794 | + /* kprefetchd has nothing to do until it is woken up the first time */ | ||
795 | + set_current_state(TASK_INTERRUPTIBLE); | ||
796 | + schedule(); | ||
797 | + | ||
798 | + do { | ||
799 | + try_to_freeze(); | ||
800 | + | ||
801 | + /* | ||
802 | + * TRICKLE_FAILED implies no entries left - we do not schedule | ||
803 | + * a wakeup, and further delay the next one. | ||
804 | + */ | ||
805 | + if (trickle_swap() == TRICKLE_FAILED) { | ||
806 | + set_current_state(TASK_INTERRUPTIBLE); | ||
807 | + schedule(); | ||
808 | + } | ||
809 | + clear_last_prefetch_free(); | ||
810 | + schedule_timeout_interruptible(PREFETCH_DELAY); | ||
811 | + } while (!kthread_should_stop()); | ||
812 | + | ||
813 | + return 0; | ||
814 | +} | ||
815 | + | ||
816 | +/* | ||
817 | + * Create kmem cache for swapped entries | ||
818 | + */ | ||
819 | +void __init prepare_swap_prefetch(void) | ||
820 | +{ | ||
821 | + struct zone *zone; | ||
822 | + | ||
823 | + swapped.cache = kmem_cache_create("swapped_entry", | ||
824 | + sizeof(struct swapped_entry), 0, SLAB_PANIC, NULL, NULL); | ||
825 | + | ||
826 | + /* | ||
827 | + * Set max number of entries to 2/3 the size of physical ram as we | ||
828 | + * only ever prefetch to consume 2/3 of the ram. | ||
829 | + */ | ||
830 | + swapped.maxcount = nr_free_pagecache_pages() / 3 * 2; | ||
831 | + | ||
832 | + for_each_zone(zone) { | ||
833 | + unsigned long present; | ||
834 | + struct node_stats *ns; | ||
835 | + int idx; | ||
836 | + | ||
837 | + present = zone->present_pages; | ||
838 | + if (!present) | ||
839 | + continue; | ||
840 | + | ||
841 | + ns = &sp_stat.node[zone->zone_pgdat->node_id]; | ||
842 | + ns->prefetch_watermark += present / 3 * 2; | ||
843 | + idx = zone_idx(zone); | ||
844 | + ns->pointfree[idx] = &ns->highfree[idx]; | ||
845 | + } | ||
846 | +} | ||
847 | + | ||
848 | +static int __init kprefetchd_init(void) | ||
849 | +{ | ||
850 | + kprefetchd_task = kthread_run(kprefetchd, NULL, "kprefetchd"); | ||
851 | + | ||
852 | + return 0; | ||
853 | +} | ||
854 | + | ||
855 | +static void __exit kprefetchd_exit(void) | ||
856 | +{ | ||
857 | + kthread_stop(kprefetchd_task); | ||
858 | +} | ||
859 | + | ||
860 | +module_init(kprefetchd_init); | ||
861 | +module_exit(kprefetchd_exit); | ||
862 | Index: linux-2.6.21-ck2/mm/swap_state.c | ||
863 | =================================================================== | ||
864 | --- linux-2.6.21-ck2.orig/mm/swap_state.c 2007-05-14 19:49:19.000000000 +1000 | ||
865 | +++ linux-2.6.21-ck2/mm/swap_state.c 2007-05-14 19:49:55.000000000 +1000 | ||
866 | @@ -10,6 +10,7 @@ | ||
867 | #include <linux/mm.h> | ||
868 | #include <linux/kernel_stat.h> | ||
869 | #include <linux/swap.h> | ||
870 | +#include <linux/swap-prefetch.h> | ||
871 | #include <linux/init.h> | ||
872 | #include <linux/pagemap.h> | ||
873 | #include <linux/buffer_head.h> | ||
874 | @@ -82,6 +83,7 @@ static int __add_to_swap_cache(struct pa | ||
875 | error = radix_tree_insert(&swapper_space.page_tree, | ||
876 | entry.val, page); | ||
877 | if (!error) { | ||
878 | + remove_from_swapped_list(entry.val); | ||
879 | page_cache_get(page); | ||
880 | SetPageLocked(page); | ||
881 | SetPageSwapCache(page); | ||
882 | @@ -95,11 +97,12 @@ static int __add_to_swap_cache(struct pa | ||
883 | return error; | ||
884 | } | ||
885 | |||
886 | -static int add_to_swap_cache(struct page *page, swp_entry_t entry) | ||
887 | +int add_to_swap_cache(struct page *page, swp_entry_t entry) | ||
888 | { | ||
889 | int error; | ||
890 | |||
891 | if (!swap_duplicate(entry)) { | ||
892 | + remove_from_swapped_list(entry.val); | ||
893 | INC_CACHE_INFO(noent_race); | ||
894 | return -ENOENT; | ||
895 | } | ||
896 | @@ -148,6 +151,9 @@ int add_to_swap(struct page * page, gfp_ | ||
897 | swp_entry_t entry; | ||
898 | int err; | ||
899 | |||
900 | + /* Swap prefetching is delayed if we're swapping pages */ | ||
901 | + delay_swap_prefetch(); | ||
902 | + | ||
903 | BUG_ON(!PageLocked(page)); | ||
904 | |||
905 | for (;;) { | ||
906 | @@ -320,6 +326,9 @@ struct page *read_swap_cache_async(swp_e | ||
907 | struct page *found_page, *new_page = NULL; | ||
908 | int err; | ||
909 | |||
910 | + /* Swap prefetching is delayed if we're already reading from swap */ | ||
911 | + delay_swap_prefetch(); | ||
912 | + | ||
913 | do { | ||
914 | /* | ||
915 | * First check the swap cache. Since this is normally | ||
916 | Index: linux-2.6.21-ck2/mm/vmscan.c | ||
917 | =================================================================== | ||
918 | --- linux-2.6.21-ck2.orig/mm/vmscan.c 2007-05-14 19:49:19.000000000 +1000 | ||
919 | +++ linux-2.6.21-ck2/mm/vmscan.c 2007-05-14 19:49:55.000000000 +1000 | ||
920 | @@ -16,6 +16,7 @@ | ||
921 | #include <linux/slab.h> | ||
922 | #include <linux/kernel_stat.h> | ||
923 | #include <linux/swap.h> | ||
924 | +#include <linux/swap-prefetch.h> | ||
925 | #include <linux/pagemap.h> | ||
926 | #include <linux/init.h> | ||
927 | #include <linux/highmem.h> | ||
928 | @@ -424,6 +425,7 @@ int remove_mapping(struct address_space | ||
929 | |||
930 | if (PageSwapCache(page)) { | ||
931 | swp_entry_t swap = { .val = page_private(page) }; | ||
932 | + add_to_swapped_list(page); | ||
933 | __delete_from_swap_cache(page); | ||
934 | write_unlock_irq(&mapping->tree_lock); | ||
935 | swap_free(swap); | ||
936 | @@ -1032,6 +1034,8 @@ unsigned long try_to_free_pages(struct z | ||
937 | .swappiness = vm_swappiness, | ||
938 | }; | ||
939 | |||
940 | + delay_swap_prefetch(); | ||
941 | + | ||
942 | count_vm_event(ALLOCSTALL); | ||
943 | |||
944 | for (i = 0; zones[i] != NULL; i++) { | ||
945 | @@ -1381,6 +1385,8 @@ static unsigned long shrink_all_zones(un | ||
946 | struct zone *zone; | ||
947 | unsigned long nr_to_scan, ret = 0; | ||
948 | |||
949 | + delay_swap_prefetch(); | ||
950 | + | ||
951 | for_each_zone(zone) { | ||
952 | |||
953 | if (!populated_zone(zone)) | ||
954 | Index: linux-2.6.21-ck2/include/linux/mm_inline.h | ||
955 | =================================================================== | ||
956 | --- linux-2.6.21-ck2.orig/include/linux/mm_inline.h 2007-05-14 19:49:19.000000000 +1000 | ||
957 | +++ linux-2.6.21-ck2/include/linux/mm_inline.h 2007-05-14 19:49:55.000000000 +1000 | ||
958 | @@ -13,6 +13,13 @@ add_page_to_inactive_list(struct zone *z | ||
959 | } | ||
960 | |||
961 | static inline void | ||
962 | +add_page_to_inactive_list_tail(struct zone *zone, struct page *page) | ||
963 | +{ | ||
964 | + list_add_tail(&page->lru, &zone->inactive_list); | ||
965 | + __inc_zone_state(zone, NR_INACTIVE); | ||
966 | +} | ||
967 | + | ||
968 | +static inline void | ||
969 | del_page_from_active_list(struct zone *zone, struct page *page) | ||
970 | { | ||
971 | list_del(&page->lru); | ||
972 | Index: linux-2.6.21-ck2/include/linux/swap-prefetch.h | ||
973 | =================================================================== | ||
974 | --- /dev/null 1970-01-01 00:00:00.000000000 +0000 | ||
975 | +++ linux-2.6.21-ck2/include/linux/swap-prefetch.h 2007-05-14 19:49:55.000000000 +1000 | ||
976 | @@ -0,0 +1,55 @@ | ||
977 | +#ifndef SWAP_PREFETCH_H_INCLUDED | ||
978 | +#define SWAP_PREFETCH_H_INCLUDED | ||
979 | + | ||
980 | +#ifdef CONFIG_SWAP_PREFETCH | ||
981 | +/* mm/swap_prefetch.c */ | ||
982 | +extern int swap_prefetch; | ||
983 | +struct swapped_entry { | ||
984 | + swp_entry_t swp_entry; /* The actual swap entry */ | ||
985 | + struct list_head swapped_list; /* Linked list of entries */ | ||
986 | +#if MAX_NUMNODES > 1 | ||
987 | + int node; /* Node id */ | ||
988 | +#endif | ||
989 | +} __attribute__((packed)); | ||
990 | + | ||
991 | +static inline void store_swap_entry_node(struct swapped_entry *entry, | ||
992 | + struct page *page) | ||
993 | +{ | ||
994 | +#if MAX_NUMNODES > 1 | ||
995 | + entry->node = page_to_nid(page); | ||
996 | +#endif | ||
997 | +} | ||
998 | + | ||
999 | +static inline int get_swap_entry_node(struct swapped_entry *entry) | ||
1000 | +{ | ||
1001 | +#if MAX_NUMNODES > 1 | ||
1002 | + return entry->node; | ||
1003 | +#else | ||
1004 | + return 0; | ||
1005 | +#endif | ||
1006 | +} | ||
1007 | + | ||
1008 | +extern void add_to_swapped_list(struct page *page); | ||
1009 | +extern void remove_from_swapped_list(const unsigned long index); | ||
1010 | +extern void delay_swap_prefetch(void); | ||
1011 | +extern void prepare_swap_prefetch(void); | ||
1012 | + | ||
1013 | +#else /* CONFIG_SWAP_PREFETCH */ | ||
1014 | +static inline void add_to_swapped_list(struct page *__unused) | ||
1015 | +{ | ||
1016 | +} | ||
1017 | + | ||
1018 | +static inline void prepare_swap_prefetch(void) | ||
1019 | +{ | ||
1020 | +} | ||
1021 | + | ||
1022 | +static inline void remove_from_swapped_list(const unsigned long __unused) | ||
1023 | +{ | ||
1024 | +} | ||
1025 | + | ||
1026 | +static inline void delay_swap_prefetch(void) | ||
1027 | +{ | ||
1028 | +} | ||
1029 | +#endif /* CONFIG_SWAP_PREFETCH */ | ||
1030 | + | ||
1031 | +#endif /* SWAP_PREFETCH_H_INCLUDED */ |