Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.21-r2/0015-2.6.21-mm-prio_dependant_scan-2.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 200 - (hide annotations) (download)
Sat May 19 14:08:35 2007 UTC (17 years ago) by niro
File size: 6529 byte(s)
-rev bump to 2.6.21-magellan-r2; disabled acpi-dsdt patch, broken -> "runaway loop request module binfmt-0000"

1 niro 200 Set the initial "priority" of memory reclaim scanning according to the cpu
2     scheduling priority thus determining how aggressively reclaim is to initally
3     progress according to nice level.
4    
5     Signed-off-by: Con Kolivas <kernel@kolivas.org>
6    
7     fs/buffer.c | 2 +-
8     include/linux/swap.h | 3 ++-
9     mm/page_alloc.c | 2 +-
10     mm/vmscan.c | 37 ++++++++++++++++++++++++-------------
11     4 files changed, 28 insertions(+), 16 deletions(-)
12    
13     Index: linux-2.6.21-ck2/fs/buffer.c
14     ===================================================================
15     --- linux-2.6.21-ck2.orig/fs/buffer.c 2007-05-14 19:49:18.000000000 +1000
16     +++ linux-2.6.21-ck2/fs/buffer.c 2007-05-14 19:49:56.000000000 +1000
17     @@ -363,7 +363,7 @@ static void free_more_memory(void)
18     for_each_online_pgdat(pgdat) {
19     zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
20     if (*zones)
21     - try_to_free_pages(zones, GFP_NOFS);
22     + try_to_free_pages(zones, GFP_NOFS, NULL);
23     }
24     }
25    
26     Index: linux-2.6.21-ck2/include/linux/swap.h
27     ===================================================================
28     --- linux-2.6.21-ck2.orig/include/linux/swap.h 2007-05-14 19:49:55.000000000 +1000
29     +++ linux-2.6.21-ck2/include/linux/swap.h 2007-05-14 19:49:56.000000000 +1000
30     @@ -189,7 +189,8 @@ extern int rotate_reclaimable_page(struc
31     extern void swap_setup(void);
32    
33     /* linux/mm/vmscan.c */
34     -extern unsigned long try_to_free_pages(struct zone **, gfp_t);
35     +extern unsigned long try_to_free_pages(struct zone **, gfp_t,
36     + struct task_struct *p);
37     extern unsigned long shrink_all_memory(unsigned long nr_pages);
38     extern int vm_mapped;
39     extern int vm_hardmaplimit;
40     Index: linux-2.6.21-ck2/mm/page_alloc.c
41     ===================================================================
42     --- linux-2.6.21-ck2.orig/mm/page_alloc.c 2007-05-14 19:49:55.000000000 +1000
43     +++ linux-2.6.21-ck2/mm/page_alloc.c 2007-05-14 19:49:56.000000000 +1000
44     @@ -1341,7 +1341,7 @@ nofail_alloc:
45     reclaim_state.reclaimed_slab = 0;
46     p->reclaim_state = &reclaim_state;
47    
48     - did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
49     + did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask, p);
50    
51     p->reclaim_state = NULL;
52     p->flags &= ~PF_MEMALLOC;
53     Index: linux-2.6.21-ck2/mm/vmscan.c
54     ===================================================================
55     --- linux-2.6.21-ck2.orig/mm/vmscan.c 2007-05-14 19:49:55.000000000 +1000
56     +++ linux-2.6.21-ck2/mm/vmscan.c 2007-05-14 19:49:56.000000000 +1000
57     @@ -993,6 +993,11 @@ static void set_kswapd_nice(struct task_
58     set_user_nice(kswapd, nice);
59     }
60    
61     +static int sc_priority(struct task_struct *p)
62     +{
63     + return (DEF_PRIORITY + (DEF_PRIORITY * effective_sc_prio(p) / 40));
64     +}
65     +
66     /*
67     * This is the direct reclaim path, for page-allocating processes. We only
68     * try to reclaim pages from zones which will satisfy the caller's allocation
69     @@ -1050,7 +1055,8 @@ static unsigned long shrink_zones(int pr
70     * holds filesystem locks which prevent writeout this might not work, and the
71     * allocation attempt will fail.
72     */
73     -unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
74     +unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
75     + struct task_struct *p)
76     {
77     int priority;
78     int ret = 0;
79     @@ -1058,7 +1064,7 @@ unsigned long try_to_free_pages(struct z
80     unsigned long nr_reclaimed = 0;
81     struct reclaim_state *reclaim_state = current->reclaim_state;
82     unsigned long lru_pages = 0;
83     - int i;
84     + int i, scan_priority = DEF_PRIORITY;
85     struct scan_control sc = {
86     .gfp_mask = gfp_mask,
87     .may_writepage = !laptop_mode,
88     @@ -1067,6 +1073,9 @@ unsigned long try_to_free_pages(struct z
89     .mapped = vm_mapped,
90     };
91    
92     + if (p)
93     + scan_priority = sc_priority(p);
94     +
95     delay_swap_prefetch();
96    
97     count_vm_event(ALLOCSTALL);
98     @@ -1081,7 +1090,7 @@ unsigned long try_to_free_pages(struct z
99     + zone_page_state(zone, NR_INACTIVE);
100     }
101    
102     - for (priority = DEF_PRIORITY; priority >= 0; priority--) {
103     + for (priority = scan_priority; priority >= 0; priority--) {
104     sc.nr_scanned = 0;
105     if (!priority)
106     disable_swap_token();
107     @@ -1111,7 +1120,7 @@ unsigned long try_to_free_pages(struct z
108     }
109    
110     /* Take a nap, wait for some writeback to complete */
111     - if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
112     + if (sc.nr_scanned && priority < scan_priority - 2)
113     congestion_wait(WRITE, HZ/10);
114     }
115     /* top priority shrink_caches still had more to do? don't OOM, then */
116     @@ -1161,9 +1170,9 @@ out:
117     */
118     static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
119     {
120     - int all_zones_ok;
121     + int all_zones_ok = 0;
122     int priority;
123     - int i;
124     + int i, scan_priority;
125     unsigned long total_scanned;
126     unsigned long nr_reclaimed;
127     struct reclaim_state *reclaim_state = current->reclaim_state;
128     @@ -1179,6 +1188,8 @@ static unsigned long balance_pgdat(pg_da
129     */
130     int temp_priority[MAX_NR_ZONES];
131    
132     + scan_priority = sc_priority(pgdat->kswapd);
133     +
134     loop_again:
135     total_scanned = 0;
136     nr_reclaimed = 0;
137     @@ -1186,9 +1197,9 @@ loop_again:
138     count_vm_event(PAGEOUTRUN);
139    
140     for (i = 0; i < pgdat->nr_zones; i++)
141     - temp_priority[i] = DEF_PRIORITY;
142     + temp_priority[i] = scan_priority;
143    
144     - for (priority = DEF_PRIORITY; priority >= 0; priority--) {
145     + for (priority = scan_priority; priority >= 0; priority--) {
146     int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
147     unsigned long lru_pages = 0;
148    
149     @@ -1209,7 +1220,7 @@ loop_again:
150     if (!populated_zone(zone))
151     continue;
152    
153     - if (zone->all_unreclaimable && priority != DEF_PRIORITY)
154     + if (zone->all_unreclaimable && priority != scan_priority)
155     continue;
156    
157     /*
158     @@ -1218,7 +1229,7 @@ loop_again:
159     * pages_high.
160     */
161     watermark = zone->pages_high + (zone->pages_high *
162     - priority / DEF_PRIORITY);
163     + priority / scan_priority);
164     if (!zone_watermark_ok(zone, order, watermark, 0, 0)) {
165     end_zone = i;
166     break;
167     @@ -1251,11 +1262,11 @@ loop_again:
168     if (!populated_zone(zone))
169     continue;
170    
171     - if (zone->all_unreclaimable && priority != DEF_PRIORITY)
172     + if (zone->all_unreclaimable && priority != scan_priority)
173     continue;
174    
175     watermark = zone->pages_high + (zone->pages_high *
176     - priority / DEF_PRIORITY);
177     + priority / scan_priority);
178    
179     if (!zone_watermark_ok(zone, order, watermark,
180     end_zone, 0))
181     @@ -1290,7 +1301,7 @@ loop_again:
182     * OK, kswapd is getting into trouble. Take a nap, then take
183     * another pass across the zones.
184     */
185     - if (total_scanned && priority < DEF_PRIORITY - 2)
186     + if (total_scanned && priority < scan_priority - 2)
187     congestion_wait(WRITE, HZ/10);
188    
189     /*