Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.16-r12/0032-2.6.16-mm-swap_prefetch_fix.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 72 - (show annotations) (download)
Mon Jun 5 09:25:38 2006 UTC (17 years, 10 months ago) by niro
File size: 2514 byte(s)
ver bump to 2.6.16-r12:
- updated to linux-2.6.16.19
- updated to ck11

1 ---
2 mm/swap_prefetch.c | 17 +++++++++--------
3 1 files changed, 9 insertions(+), 8 deletions(-)
4
5 Index: linux-2.6.16-ck10/mm/swap_prefetch.c
6 ===================================================================
7 --- linux-2.6.16-ck10.orig/mm/swap_prefetch.c 2006-05-08 21:02:12.000000000 +1000
8 +++ linux-2.6.16-ck10/mm/swap_prefetch.c 2006-05-08 21:03:15.000000000 +1000
9 @@ -76,7 +76,7 @@ inline void delay_swap_prefetch(void)
10 void add_to_swapped_list(struct page *page)
11 {
12 struct swapped_entry *entry;
13 - unsigned long index;
14 + unsigned long index, flags;
15 int wakeup;
16
17 if (!swap_prefetch)
18 @@ -84,7 +84,7 @@ void add_to_swapped_list(struct page *pa
19
20 wakeup = 0;
21
22 - spin_lock(&swapped.lock);
23 + spin_lock_irqsave(&swapped.lock, flags);
24 if (swapped.count >= swapped.maxcount) {
25 /*
26 * We limit the number of entries to 2/3 of physical ram.
27 @@ -123,7 +123,7 @@ void add_to_swapped_list(struct page *pa
28 }
29
30 out_locked:
31 - spin_unlock(&swapped.lock);
32 + spin_unlock_irqrestore(&swapped.lock, flags);
33
34 /* Do the wakeup outside the lock to shorten lock hold time. */
35 if (wakeup)
36 @@ -468,6 +468,7 @@ static enum trickle_return trickle_swap(
37 {
38 enum trickle_return ret = TRICKLE_DELAY;
39 struct swapped_entry *entry;
40 + unsigned long flags;
41
42 /*
43 * If laptop_mode is enabled don't prefetch to avoid hard drives
44 @@ -486,10 +487,10 @@ static enum trickle_return trickle_swap(
45 if (!prefetch_suitable())
46 break;
47
48 - spin_lock(&swapped.lock);
49 + spin_lock_irqsave(&swapped.lock, flags);
50 if (list_empty(&swapped.list)) {
51 ret = TRICKLE_FAILED;
52 - spin_unlock(&swapped.lock);
53 + spin_unlock_irqrestore(&swapped.lock, flags);
54 break;
55 }
56
57 @@ -508,7 +509,7 @@ static enum trickle_return trickle_swap(
58 * be a reason we could not swap them back in so
59 * delay attempting further prefetching.
60 */
61 - spin_unlock(&swapped.lock);
62 + spin_unlock_irqrestore(&swapped.lock, flags);
63 if (aggressive_prefetch) {
64 /*
65 * If we're prefetching aggressively and
66 @@ -527,12 +528,12 @@ static enum trickle_return trickle_swap(
67 * not suitable for prefetching so skip it.
68 */
69 entry = prev_swapped_entry(entry);
70 - spin_unlock(&swapped.lock);
71 + spin_unlock_irqrestore(&swapped.lock, flags);
72 continue;
73 }
74 swp_entry = entry->swp_entry;
75 entry = prev_swapped_entry(entry);
76 - spin_unlock(&swapped.lock);
77 + spin_unlock_irqrestore(&swapped.lock, flags);
78
79 if (trickle_swap_cache_async(swp_entry, node) == TRICKLE_DELAY &&
80 !aggressive_prefetch)