--- mm/swap_prefetch.c | 17 +++++++++-------- 1 files changed, 9 insertions(+), 8 deletions(-) Index: linux-2.6.16-ck10/mm/swap_prefetch.c =================================================================== --- linux-2.6.16-ck10.orig/mm/swap_prefetch.c 2006-05-08 21:02:12.000000000 +1000 +++ linux-2.6.16-ck10/mm/swap_prefetch.c 2006-05-08 21:03:15.000000000 +1000 @@ -76,7 +76,7 @@ inline void delay_swap_prefetch(void) void add_to_swapped_list(struct page *page) { struct swapped_entry *entry; - unsigned long index; + unsigned long index, flags; int wakeup; if (!swap_prefetch) @@ -84,7 +84,7 @@ void add_to_swapped_list(struct page *pa wakeup = 0; - spin_lock(&swapped.lock); + spin_lock_irqsave(&swapped.lock, flags); if (swapped.count >= swapped.maxcount) { /* * We limit the number of entries to 2/3 of physical ram. @@ -123,7 +123,7 @@ void add_to_swapped_list(struct page *pa } out_locked: - spin_unlock(&swapped.lock); + spin_unlock_irqrestore(&swapped.lock, flags); /* Do the wakeup outside the lock to shorten lock hold time. */ if (wakeup) @@ -468,6 +468,7 @@ static enum trickle_return trickle_swap( { enum trickle_return ret = TRICKLE_DELAY; struct swapped_entry *entry; + unsigned long flags; /* * If laptop_mode is enabled don't prefetch to avoid hard drives @@ -486,10 +487,10 @@ static enum trickle_return trickle_swap( if (!prefetch_suitable()) break; - spin_lock(&swapped.lock); + spin_lock_irqsave(&swapped.lock, flags); if (list_empty(&swapped.list)) { ret = TRICKLE_FAILED; - spin_unlock(&swapped.lock); + spin_unlock_irqrestore(&swapped.lock, flags); break; } @@ -508,7 +509,7 @@ static enum trickle_return trickle_swap( * be a reason we could not swap them back in so * delay attempting further prefetching. */ - spin_unlock(&swapped.lock); + spin_unlock_irqrestore(&swapped.lock, flags); if (aggressive_prefetch) { /* * If we're prefetching aggressively and @@ -527,12 +528,12 @@ static enum trickle_return trickle_swap( * not suitable for prefetching so skip it. */ entry = prev_swapped_entry(entry); - spin_unlock(&swapped.lock); + spin_unlock_irqrestore(&swapped.lock, flags); continue; } swp_entry = entry->swp_entry; entry = prev_swapped_entry(entry); - spin_unlock(&swapped.lock); + spin_unlock_irqrestore(&swapped.lock, flags); if (trickle_swap_cache_async(swp_entry, node) == TRICKLE_DELAY && !aggressive_prefetch)