Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.16-r10/0026-2.6.16-sp-resume1.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 70 - (hide annotations) (download)
Thu May 11 19:09:22 2006 UTC (18 years ago) by niro
File size: 6222 byte(s)
import

1 niro 70 ---
2     Documentation/sysctl/vm.txt | 9 +++
3     mm/swap_prefetch.c | 119 +++++++++++++++++++++++++++++---------------
4     2 files changed, 90 insertions(+), 38 deletions(-)
5    
6     Index: linux-2.6.16-ck1/mm/swap_prefetch.c
7     ===================================================================
8     --- linux-2.6.16-ck1.orig/mm/swap_prefetch.c 2006-03-20 20:46:55.000000000 +1100
9     +++ linux-2.6.16-ck1/mm/swap_prefetch.c 2006-03-20 20:47:00.000000000 +1100
10     @@ -27,8 +27,18 @@
11     */
12     #define PREFETCH_DELAY (HZ * 5)
13    
14     -/* sysctl - enable/disable swap prefetching */
15     -int swap_prefetch __read_mostly = 1;
16     +#define PREFETCH_NORMAL (1 << 0)
17     +#define PREFETCH_AGGRESSIVE (1 << 1)
18     +/*
19     + * sysctl - enable/disable swap prefetching bits
20     + * This is composed of the bitflags PREFETCH_NORMAL and PREFETCH_AGGRESSIVE.
21     + * Once PREFETCH_AGGRESSIVE is set, swap prefetching will be peformed as much
22     + * as possible irrespective of load conditions and then the
23     + * PREFETCH_AGGRESSIVE bit will be unset.
24     + */
25     +int swap_prefetch __read_mostly = PREFETCH_NORMAL;
26     +
27     +#define aggressive_prefetch (unlikely(swap_prefetch & PREFETCH_AGGRESSIVE))
28    
29     struct swapped_root {
30     unsigned long busy; /* vm busy */
31     @@ -291,43 +301,17 @@ static void examine_free_limits(void)
32     }
33    
34     /*
35     - * We want to be absolutely certain it's ok to start prefetching.
36     + * Have some hysteresis between where page reclaiming and prefetching
37     + * will occur to prevent ping-ponging between them.
38     */
39     -static int prefetch_suitable(void)
40     +static void set_suitable_nodes(void)
41     {
42     - unsigned long limit;
43     struct zone *z;
44     - int node, ret = 0, test_pagestate = 0;
45     -
46     - /* Purposefully racy */
47     - if (test_bit(0, &swapped.busy)) {
48     - __clear_bit(0, &swapped.busy);
49     - goto out;
50     - }
51     -
52     - /*
53     - * get_page_state and above_background_load are expensive so we only
54     - * perform them every SWAP_CLUSTER_MAX prefetched_pages.
55     - * We test to see if we're above_background_load as disk activity
56     - * even at low priority can cause interrupt induced scheduling
57     - * latencies.
58     - */
59     - if (!(sp_stat.prefetched_pages % SWAP_CLUSTER_MAX)) {
60     - if (above_background_load())
61     - goto out;
62     - test_pagestate = 1;
63     - }
64    
65     - clear_current_prefetch_free();
66     -
67     - /*
68     - * Have some hysteresis between where page reclaiming and prefetching
69     - * will occur to prevent ping-ponging between them.
70     - */
71     for_each_zone(z) {
72     struct node_stats *ns;
73     unsigned long free;
74     - int idx;
75     + int node, idx;
76    
77     if (!populated_zone(z))
78     continue;
79     @@ -349,6 +333,45 @@ static int prefetch_suitable(void)
80     }
81     ns->current_free += free;
82     }
83     +}
84     +
85     +/*
86     + * We want to be absolutely certain it's ok to start prefetching.
87     + */
88     +static int prefetch_suitable(void)
89     +{
90     + unsigned long limit;
91     + int node, ret = 0, test_pagestate = 0;
92     +
93     + if (aggressive_prefetch) {
94     + clear_current_prefetch_free();
95     + set_suitable_nodes();
96     + if (!nodes_empty(sp_stat.prefetch_nodes))
97     + ret = 1;
98     + goto out;
99     + }
100     +
101     + /* Purposefully racy */
102     + if (test_bit(0, &swapped.busy)) {
103     + __clear_bit(0, &swapped.busy);
104     + goto out;
105     + }
106     +
107     + /*
108     + * get_page_state and above_background_load are expensive so we only
109     + * perform them every SWAP_CLUSTER_MAX prefetched_pages.
110     + * We test to see if we're above_background_load as disk activity
111     + * even at low priority can cause interrupt induced scheduling
112     + * latencies.
113     + */
114     + if (!(sp_stat.prefetched_pages % SWAP_CLUSTER_MAX)) {
115     + if (above_background_load())
116     + goto out;
117     + test_pagestate = 1;
118     + }
119     +
120     + clear_current_prefetch_free();
121     + set_suitable_nodes();
122    
123     /*
124     * We iterate over each node testing to see if it is suitable for
125     @@ -421,6 +444,17 @@ static inline struct swapped_entry *prev
126     struct swapped_entry, swapped_list);
127     }
128    
129     +static unsigned long pages_prefetched(void)
130     +{
131     + unsigned long pages = sp_stat.prefetched_pages;
132     +
133     + if (pages) {
134     + lru_add_drain();
135     + sp_stat.prefetched_pages = 0;
136     + }
137     + return pages;
138     +}
139     +
140     /*
141     * trickle_swap is the main function that initiates the swap prefetching. It
142     * first checks to see if the busy flag is set, and does not prefetch if it
143     @@ -438,7 +472,7 @@ static enum trickle_return trickle_swap(
144     * If laptop_mode is enabled don't prefetch to avoid hard drives
145     * doing unnecessary spin-ups
146     */
147     - if (!swap_prefetch || laptop_mode)
148     + if (!swap_prefetch || (laptop_mode && !aggressive_prefetch))
149     return ret;
150    
151     examine_free_limits();
152     @@ -474,6 +508,14 @@ static enum trickle_return trickle_swap(
153     * delay attempting further prefetching.
154     */
155     spin_unlock(&swapped.lock);
156     + if (aggressive_prefetch) {
157     + /*
158     + * If we're prefetching aggressively and
159     + * making progress then don't give up.
160     + */
161     + if (pages_prefetched())
162     + continue;
163     + }
164     break;
165     }
166    
167     @@ -491,14 +533,15 @@ static enum trickle_return trickle_swap(
168     entry = prev_swapped_entry(entry);
169     spin_unlock(&swapped.lock);
170    
171     - if (trickle_swap_cache_async(swp_entry, node) == TRICKLE_DELAY)
172     + if (trickle_swap_cache_async(swp_entry, node) == TRICKLE_DELAY &&
173     + !aggressive_prefetch)
174     break;
175     }
176    
177     - if (sp_stat.prefetched_pages) {
178     - lru_add_drain();
179     - sp_stat.prefetched_pages = 0;
180     - }
181     + /* Return value of pages_prefetched irrelevant here */
182     + pages_prefetched();
183     + if (aggressive_prefetch)
184     + swap_prefetch &= ~PREFETCH_AGGRESSIVE;
185     return ret;
186     }
187    
188     Index: linux-2.6.16-ck1/Documentation/sysctl/vm.txt
189     ===================================================================
190     --- linux-2.6.16-ck1.orig/Documentation/sysctl/vm.txt 2006-03-20 20:46:55.000000000 +1100
191     +++ linux-2.6.16-ck1/Documentation/sysctl/vm.txt 2006-03-20 20:47:00.000000000 +1100
192     @@ -188,4 +188,13 @@ memory subsystem has been extremely idle
193     copying back pages from swap into the swapcache and keep a copy in swap. In
194     practice it can take many minutes before the vm is idle enough.
195    
196     +This is value ORed together of
197     +1 = Normal background swap prefetching when load is light
198     +2 = Aggressively swap prefetch as much as possible
199     +
200     +When 2 is set, after the maximum amount possible has been prefetched, this bit
201     +is unset. ie Setting the value to 3 will prefetch aggressively then drop to 1.
202     +This is useful for doing aggressive prefetching for short periods in scripts
203     +such as after resuming from software suspend.
204     +
205     The default value is 1.