--- Documentation/sysctl/vm.txt | 36 + drivers/block/loop.c | 6 fs/mpage.c | 4 fs/nfsd/vfs.c | 6 include/linux/fs.h | 41 - include/linux/mm.h | 31 include/linux/page-flags.h | 5 include/linux/radix-tree.h | 82 ++ include/linux/sysctl.h | 2 include/linux/writeback.h | 6 kernel/sysctl.c | 28 lib/radix-tree.c | 208 +++++- mm/Kconfig | 55 + mm/filemap.c | 86 ++ mm/memory.c | 1 mm/page-writeback.c | 2 mm/page_alloc.c | 2 mm/readahead.c | 1519 +++++++++++++++++++++++++++++++++++++++++++- mm/swap.c | 2 mm/vmscan.c | 3 20 files changed, 2062 insertions(+), 63 deletions(-) Index: linux-2.6.16-ck1/Documentation/sysctl/vm.txt =================================================================== --- linux-2.6.16-ck1.orig/Documentation/sysctl/vm.txt 2006-03-20 20:47:01.000000000 +1100 +++ linux-2.6.16-ck1/Documentation/sysctl/vm.txt 2006-03-20 20:47:04.000000000 +1100 @@ -30,6 +30,8 @@ Currently, these files are in /proc/sys/ - zone_reclaim_mode - zone_reclaim_interval - swap_prefetch +- readahead_ratio +- readahead_hit_rate ============================================================== @@ -204,3 +206,37 @@ swap_prefetch unset and then it is enabl prefetched. The default value is 1. + +============================================================== + +readahead_ratio + +This limits readahead size to percent of the thrashing-threshold, +which is dynamicly estimated from the _history_ read speed and +system load, to deduce the _future_ readahead request size. + +Set it to a smaller value if you have not enough memory for all the +concurrent readers, or the I/O loads fluctuate a lot. But if there's +plenty of memory(>2MB per reader), enlarge it may help speedup reads. + +readahead_ratio also selects the readahead logic: +0: disable readahead totally +1-9: select the stock readahead logic +10-inf: select the adaptive readahead logic + +The default value is 50; reasonable values would be 50-100. + +============================================================== + +readahead_hit_rate + +This is the max allowed value of (readahead-pages : accessed-pages). +Useful only when (readahead_ratio >= 10). If the previous readahead +request has bad hit rate, the kernel will be reluctant to do the next +readahead. + +A larger value helps catch more sparse access patterns. Be aware that +readahead of the sparse patterns sacrifices memory for speed. + +The default value is 2. +It is recommended to keep the value below (max-readahead-pages / 8). Index: linux-2.6.16-ck1/drivers/block/loop.c =================================================================== --- linux-2.6.16-ck1.orig/drivers/block/loop.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/drivers/block/loop.c 2006-03-20 20:47:04.000000000 +1100 @@ -779,6 +779,12 @@ static int loop_set_fd(struct loop_devic mapping = file->f_mapping; inode = mapping->host; + /* + * The upper layer should already do proper look-ahead, + * one more look-ahead here only ruins the cache hit rate. + */ + file->f_ra.flags |= RA_FLAG_NO_LOOKAHEAD; + if (!(file->f_mode & FMODE_WRITE)) lo_flags |= LO_FLAGS_READ_ONLY; Index: linux-2.6.16-ck1/fs/mpage.c =================================================================== --- linux-2.6.16-ck1.orig/fs/mpage.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/fs/mpage.c 2006-03-20 20:47:04.000000000 +1100 @@ -343,8 +343,10 @@ mpage_readpages(struct address_space *ma bio = do_mpage_readpage(bio, page, nr_pages - page_idx, &last_block_in_bio, get_block); - if (!pagevec_add(&lru_pvec, page)) + if (!pagevec_add(&lru_pvec, page)) { + cond_resched(); __pagevec_lru_add(&lru_pvec); + } } else { page_cache_release(page); } Index: linux-2.6.16-ck1/fs/nfsd/vfs.c =================================================================== --- linux-2.6.16-ck1.orig/fs/nfsd/vfs.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/fs/nfsd/vfs.c 2006-03-20 20:47:04.000000000 +1100 @@ -833,10 +833,14 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st #endif /* Get readahead parameters */ - ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino); + if (prefer_adaptive_readahead()) + ra = NULL; + else + ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino); if (ra && ra->p_set) file->f_ra = ra->p_ra; + file->f_ra.flags |= RA_FLAG_NFSD; if (file->f_op->sendfile) { svc_pushback_unused_pages(rqstp); Index: linux-2.6.16-ck1/include/linux/fs.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/fs.h 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/fs.h 2006-03-20 20:47:04.000000000 +1100 @@ -600,19 +600,40 @@ struct fown_struct { * Track a single file's readahead state */ struct file_ra_state { - unsigned long start; /* Current window */ - unsigned long size; - unsigned long flags; /* ra flags RA_FLAG_xxx*/ - unsigned long cache_hit; /* cache hit count*/ - unsigned long prev_page; /* Cache last read() position */ - unsigned long ahead_start; /* Ahead window */ - unsigned long ahead_size; - unsigned long ra_pages; /* Maximum readahead window */ - unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ - unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ + union { + struct { /* conventional read-ahead */ + unsigned long start; /* Current window */ + unsigned long size; + unsigned long ahead_start; /* Ahead window */ + unsigned long ahead_size; + unsigned long cache_hit; /* cache hit count */ + }; +#ifdef CONFIG_ADAPTIVE_READAHEAD + struct { /* adaptive read-ahead */ + pgoff_t la_index; + pgoff_t ra_index; + pgoff_t lookahead_index; + pgoff_t readahead_index; + unsigned long age; + uint64_t cache_hits; + }; +#endif + }; + + /* mmap read-around */ + unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ + unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ + + /* common ones */ + unsigned long flags; /* ra flags RA_FLAG_xxx*/ + unsigned long prev_page; /* Cache last read() position */ + unsigned long ra_pages; /* Maximum readahead window */ }; #define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ #define RA_FLAG_INCACHE 0x02 /* file is already in cache */ +#define RA_FLAG_MMAP (1UL<<31) /* mmaped page access */ +#define RA_FLAG_NO_LOOKAHEAD (1UL<<30) /* disable look-ahead */ +#define RA_FLAG_NFSD (1UL<<29) /* request from nfsd */ struct file { /* Index: linux-2.6.16-ck1/include/linux/mm.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/mm.h 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/mm.h 2006-03-20 20:47:04.000000000 +1100 @@ -954,7 +954,11 @@ extern int filemap_populate(struct vm_ar int write_one_page(struct page *page, int wait); /* readahead.c */ +#ifdef CONFIG_ADAPTIVE_READAHEAD +#define VM_MAX_READAHEAD 1024 /* kbytes */ +#else #define VM_MAX_READAHEAD 128 /* kbytes */ +#endif #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ #define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before * turning readahead off */ @@ -971,6 +975,33 @@ unsigned long page_cache_readahead(struc void handle_ra_miss(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset); unsigned long max_sane_readahead(unsigned long nr); +unsigned long +page_cache_readahead_adaptive(struct address_space *mapping, + struct file_ra_state *ra, struct file *filp, + struct page *prev_page, struct page *page, + pgoff_t first_index, pgoff_t index, pgoff_t last_index); + +#ifdef CONFIG_ADAPTIVE_READAHEAD +void fastcall readahead_cache_hit(struct file_ra_state *ra, struct page *page); +extern int readahead_ratio; +#else +#define readahead_cache_hit(ra, page) do { } while (0) +#define readahead_ratio 1 +#endif /* CONFIG_ADAPTIVE_READAHEAD */ + +static inline int prefer_adaptive_readahead(void) +{ + return readahead_ratio >= 10; +} + +DECLARE_PER_CPU(unsigned long, readahead_aging); +static inline void inc_readahead_aging(void) +{ + if (prefer_adaptive_readahead()) { + per_cpu(readahead_aging, get_cpu())++; + put_cpu(); + } +} /* Do stack extension */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); Index: linux-2.6.16-ck1/include/linux/page-flags.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/page-flags.h 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/page-flags.h 2006-03-20 20:47:04.000000000 +1100 @@ -75,6 +75,7 @@ #define PG_reclaim 17 /* To be reclaimed asap */ #define PG_nosave_free 18 /* Free, should not be written */ #define PG_uncached 19 /* Page has been mapped as uncached */ +#define PG_readahead 20 /* Reminder to do readahead */ /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -344,6 +345,10 @@ extern void __mod_page_state_offset(unsi #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags) #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags) +#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags) +#define __SetPageReadahead(page) __set_bit(PG_readahead, &(page)->flags) +#define TestClearPageReadahead(page) test_and_clear_bit(PG_readahead, &(page)->flags) + struct page; /* forward declaration */ int test_clear_page_dirty(struct page *page); Index: linux-2.6.16-ck1/include/linux/radix-tree.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/radix-tree.h 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/radix-tree.h 2006-03-20 20:47:04.000000000 +1100 @@ -23,12 +23,24 @@ #include #include +#define RADIX_TREE_MAP_SHIFT 6 +#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) +#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) + struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; }; +/* + * Lookaside cache to support access patterns with strong locality. + */ +struct radix_tree_cache { + unsigned long first_index; + struct radix_tree_node *tree_node; +}; + #define RADIX_TREE_INIT(mask) { \ .height = 0, \ .gfp_mask = (mask), \ @@ -46,9 +58,18 @@ do { \ } while (0) int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); -void *radix_tree_lookup(struct radix_tree_root *, unsigned long); -void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *radix_tree_lookup_node(struct radix_tree_root *, unsigned long, + unsigned int); +void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long); void *radix_tree_delete(struct radix_tree_root *, unsigned long); +unsigned int radix_tree_cache_count(struct radix_tree_cache *cache); +void *radix_tree_cache_lookup_node(struct radix_tree_root *root, + struct radix_tree_cache *cache, + unsigned long index, unsigned int level); +unsigned long radix_tree_scan_hole_backward(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); +unsigned long radix_tree_scan_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); @@ -70,4 +91,61 @@ static inline void radix_tree_preload_en preempt_enable(); } +/** + * radix_tree_lookup - perform lookup operation on a radix tree + * @root: radix tree root + * @index: index key + * + * Lookup the item at the position @index in the radix tree @root. + */ +static inline void *radix_tree_lookup(struct radix_tree_root *root, + unsigned long index) +{ + return radix_tree_lookup_node(root, index, 0); +} + +/** + * radix_tree_cache_init - init a look-aside cache + * @cache: look-aside cache + * + * Init the radix tree look-aside cache @cache. + */ +static inline void radix_tree_cache_init(struct radix_tree_cache *cache) +{ + cache->first_index = RADIX_TREE_MAP_MASK; + cache->tree_node = NULL; +} + +/** + * radix_tree_cache_lookup - cached lookup on a radix tree + * @root: radix tree root + * @cache: look-aside cache + * @index: index key + * + * Lookup the item at the position @index in the radix tree @root, + * and make use of @cache to speedup the lookup process. + */ +static inline void *radix_tree_cache_lookup(struct radix_tree_root *root, + struct radix_tree_cache *cache, + unsigned long index) +{ + return radix_tree_cache_lookup_node(root, cache, index, 0); +} + +static inline unsigned int radix_tree_cache_size(struct radix_tree_cache *cache) +{ + return RADIX_TREE_MAP_SIZE; +} + +static inline int radix_tree_cache_full(struct radix_tree_cache *cache) +{ + return radix_tree_cache_count(cache) == radix_tree_cache_size(cache); +} + +static inline unsigned long +radix_tree_cache_first_index(struct radix_tree_cache *cache) +{ + return cache->first_index; +} + #endif /* _LINUX_RADIX_TREE_H */ Index: linux-2.6.16-ck1/include/linux/sysctl.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/sysctl.h 2006-03-20 20:46:56.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/sysctl.h 2006-03-20 20:47:04.000000000 +1100 @@ -191,6 +191,8 @@ enum VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ VM_SWAP_PREFETCH=33, /* swap prefetch */ VM_HARDMAPLIMIT=34, /* Make mapped a hard limit */ + VM_READAHEAD_RATIO=35, /* percent of read-ahead size to thrashing-threshold */ + VM_READAHEAD_HIT_RATE=36, /* one accessed page legitimizes so many read-ahead pages */ }; Index: linux-2.6.16-ck1/include/linux/writeback.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/writeback.h 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/writeback.h 2006-03-20 20:47:04.000000000 +1100 @@ -85,6 +85,12 @@ void laptop_io_completion(void); void laptop_sync_completion(void); void throttle_vm_writeout(void); +extern struct timer_list laptop_mode_wb_timer; +static inline int laptop_spinned_down(void) +{ + return !timer_pending(&laptop_mode_wb_timer); +} + /* These are exported to sysctl. */ extern int dirty_background_ratio; extern int vm_dirty_ratio; Index: linux-2.6.16-ck1/kernel/sysctl.c =================================================================== --- linux-2.6.16-ck1.orig/kernel/sysctl.c 2006-03-20 20:46:56.000000000 +1100 +++ linux-2.6.16-ck1/kernel/sysctl.c 2006-03-20 20:47:04.000000000 +1100 @@ -74,6 +74,12 @@ extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; extern int percpu_pagelist_fraction; +#if defined(CONFIG_ADAPTIVE_READAHEAD) +extern int readahead_ratio; +extern int readahead_hit_rate; +static int one = 1; +#endif + #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) int unknown_nmi_panic; extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *, @@ -961,6 +967,28 @@ static ctl_table vm_table[] = { .proc_handler = &proc_dointvec, }, #endif +#ifdef CONFIG_ADAPTIVE_READAHEAD + { + .ctl_name = VM_READAHEAD_RATIO, + .procname = "readahead_ratio", + .data = &readahead_ratio, + .maxlen = sizeof(readahead_ratio), + .mode = 0644, + .proc_handler = &proc_dointvec, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = VM_READAHEAD_HIT_RATE, + .procname = "readahead_hit_rate", + .data = &readahead_hit_rate, + .maxlen = sizeof(readahead_hit_rate), + .mode = 0644, + .proc_handler = &proc_dointvec, + .strategy = &sysctl_intvec, + .extra1 = &one, + }, +#endif { .ctl_name = 0 } }; Index: linux-2.6.16-ck1/lib/radix-tree.c =================================================================== --- linux-2.6.16-ck1.orig/lib/radix-tree.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/lib/radix-tree.c 2006-03-20 20:47:04.000000000 +1100 @@ -32,16 +32,7 @@ #include -#ifdef __KERNEL__ -#define RADIX_TREE_MAP_SHIFT 6 -#else -#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ -#endif #define RADIX_TREE_TAGS 2 - -#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) -#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) - #define RADIX_TREE_TAG_LONGS \ ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) @@ -286,32 +277,89 @@ int radix_tree_insert(struct radix_tree_ } EXPORT_SYMBOL(radix_tree_insert); -static inline void **__lookup_slot(struct radix_tree_root *root, - unsigned long index) +/** + * radix_tree_lookup_node - low level lookup routine + * @root: radix tree root + * @index: index key + * @level: stop at that many levels from the tree leaf + * + * Lookup the item at the position @index in the radix tree @root. + * The return value is: + * @level == 0: page at @index; + * @level == 1: the corresponding bottom level tree node; + * @level < height: (@level-1)th parent node of the bottom node + * that contains @index; + * @level >= height: the root node. + */ +void *radix_tree_lookup_node(struct radix_tree_root *root, + unsigned long index, unsigned int level) { unsigned int height, shift; - struct radix_tree_node **slot; + struct radix_tree_node *slot; height = root->height; if (index > radix_tree_maxindex(height)) return NULL; shift = (height-1) * RADIX_TREE_MAP_SHIFT; - slot = &root->rnode; + slot = root->rnode; - while (height > 0) { - if (*slot == NULL) + while (height > level) { + if (slot == NULL) return NULL; - slot = (struct radix_tree_node **) - ((*slot)->slots + - ((index >> shift) & RADIX_TREE_MAP_MASK)); + slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK]; shift -= RADIX_TREE_MAP_SHIFT; height--; } - return (void **)slot; + return slot; +} +EXPORT_SYMBOL(radix_tree_lookup_node); + +/** + * radix_tree_cache_lookup_node - cached lookup node + * @root: radix tree root + * @cache: look-aside cache + * @index: index key + * + * Lookup the item at the position @index in the radix tree @root, + * and return the node @level levels from the bottom in the search path. + * + * @cache stores the last accessed upper level tree node by this + * function, and is always checked first before searching in the tree. + * It can improve speed for access patterns with strong locality. + * + * NOTE: + * - The cache becomes invalid on leaving the lock; + * - Do not intermix calls with different @level. + */ +void *radix_tree_cache_lookup_node(struct radix_tree_root *root, + struct radix_tree_cache *cache, + unsigned long index, unsigned int level) +{ + struct radix_tree_node *node; + unsigned long i; + unsigned long mask; + + if (level >= root->height) + return root->rnode; + + i = ((index >> (level * RADIX_TREE_MAP_SHIFT)) & RADIX_TREE_MAP_MASK); + mask = ~((RADIX_TREE_MAP_SIZE << (level * RADIX_TREE_MAP_SHIFT)) - 1); + + if ((index & mask) == cache->first_index) + return cache->tree_node->slots[i]; + + node = radix_tree_lookup_node(root, index, level + 1); + if (!node) + return 0; + + cache->tree_node = node; + cache->first_index = (index & mask); + return node->slots[i]; } +EXPORT_SYMBOL(radix_tree_cache_lookup_node); /** * radix_tree_lookup_slot - lookup a slot in a radix tree @@ -323,25 +371,131 @@ static inline void **__lookup_slot(struc */ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) { - return __lookup_slot(root, index); + struct radix_tree_node *node; + + node = radix_tree_lookup_node(root, index, 1); + return node->slots + (index & RADIX_TREE_MAP_MASK); } EXPORT_SYMBOL(radix_tree_lookup_slot); /** - * radix_tree_lookup - perform lookup operation on a radix tree + * radix_tree_cache_count - items in the cached node + * @cache: radix tree look-aside cache + * + * Query the number of items contained in the cached node. + */ +unsigned int radix_tree_cache_count(struct radix_tree_cache *cache) +{ + if (!(cache->first_index & RADIX_TREE_MAP_MASK)) + return cache->tree_node->count; + else + return 0; +} +EXPORT_SYMBOL(radix_tree_cache_count); + +/** + * radix_tree_scan_hole_backward - scan backward for hole * @root: radix tree root * @index: index key + * @max_scan: advice on max items to scan (it may scan a little more) * - * Lookup the item at the position @index in the radix tree @root. + * Scan backward from @index for a hole/empty item, stop when + * - hit hole + * - @max_scan or more items scanned + * - hit index 0 + * + * Return the correponding index. */ -void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) +unsigned long radix_tree_scan_hole_backward(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan) { - void **slot; + struct radix_tree_cache cache; + struct radix_tree_node *node; + unsigned long origin; + int i; + + origin = index; + radix_tree_cache_init(&cache); + + while (origin - index < max_scan) { + node = radix_tree_cache_lookup_node(root, &cache, index, 1); + if (!node) + break; + + if (node->count == RADIX_TREE_MAP_SIZE) { + index = (index - RADIX_TREE_MAP_SIZE) | + RADIX_TREE_MAP_MASK; + goto check_underflow; + } + + for (i = index & RADIX_TREE_MAP_MASK; i >= 0; i--, index--) { + if (!node->slots[i]) + goto out; + } + +check_underflow: + if (unlikely(index == ULONG_MAX)) { + index = 0; + break; + } + } + +out: + return index; +} +EXPORT_SYMBOL(radix_tree_scan_hole_backward); - slot = __lookup_slot(root, index); - return slot != NULL ? *slot : NULL; +/** + * radix_tree_scan_hole - scan for hole + * @root: radix tree root + * @index: index key + * @max_scan: advice on max items to scan (it may scan a little more) + * + * Scan forward from @index for a hole/empty item, stop when + * - hit hole + * - hit EOF + * - hit index ULONG_MAX + * - @max_scan or more items scanned + * + * Return the correponding index. + */ +unsigned long radix_tree_scan_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan) +{ + struct radix_tree_cache cache; + struct radix_tree_node *node; + unsigned long origin; + int i; + + origin = index; + radix_tree_cache_init(&cache); + + while (index - origin < max_scan) { + node = radix_tree_cache_lookup_node(root, &cache, index, 1); + if (!node) + break; + + if (node->count == RADIX_TREE_MAP_SIZE) { + index = (index | RADIX_TREE_MAP_MASK) + 1; + goto check_overflow; + } + + for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; + i++, index++) { + if (!node->slots[i]) + goto out; + } + +check_overflow: + if (unlikely(!index)) { + index = ULONG_MAX; + break; + } + } +out: + return index; } -EXPORT_SYMBOL(radix_tree_lookup); +EXPORT_SYMBOL(radix_tree_scan_hole); /** * radix_tree_tag_set - set a tag on a radix tree node Index: linux-2.6.16-ck1/mm/Kconfig =================================================================== --- linux-2.6.16-ck1.orig/mm/Kconfig 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/mm/Kconfig 2006-03-20 20:47:04.000000000 +1100 @@ -139,3 +139,58 @@ config SPLIT_PTLOCK_CPUS config MIGRATION def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM depends on SWAP + +# +# Adaptive file readahead +# +config ADAPTIVE_READAHEAD + bool "Adaptive file readahead (EXPERIMENTAL)" + default n + depends on EXPERIMENTAL + help + Readahead is a technique employed by the kernel in an attempt + to improve file reading performance. If the kernel has reason + to believe that a particular file is being read sequentially, + it will attempt to read blocks from the file into memory before + the application requests them. When readahead works, it speeds + up the system's throughput, since the reading application does + not have to wait for its requests. When readahead fails, instead, + it generates useless I/O and occupies memory pages which are + needed for some other purpose. For sequential readings, + + Normally, the kernel uses a stock readahead logic that is well + understood and well tuned. This option enables a much complex and + feature rich one. It is more aggressive and memory efficient in + doing readahead, and supports some less-common access patterns such + as reading backward and reading sparsely. However, due to the great + diversity of real world applications, it might not fit everyone. + + Please refer to Documentation/sysctl/vm.txt for tunable parameters. + + Say Y here if you are building kernel for file servers. + Say N if you are unsure. + +config DEBUG_READAHEAD + bool "Readahead debug and accounting" + default n + depends on ADAPTIVE_READAHEAD + select DEBUG_FS + help + This option injects extra code to dump detailed debug traces and do + readahead events accounting. + + To actually get the data: + + mkdir /debug + mount -t debug none /debug + + After that you can do the following: + + echo > /debug/readahead/events # reset the counters + cat /debug/readahead/events # check the counters + + echo 1 > /debug/readahead/debug_level # show printk traces + echo 2 > /debug/readahead/debug_level # show verbose printk traces + echo 0 > /debug/readahead/debug_level # stop filling my kern.log + + Say N, unless you have readahead performance problems. Index: linux-2.6.16-ck1/mm/filemap.c =================================================================== --- linux-2.6.16-ck1.orig/mm/filemap.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/mm/filemap.c 2006-03-20 20:47:04.000000000 +1100 @@ -42,6 +42,12 @@ static ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs); +#ifdef CONFIG_DEBUG_READAHEAD +extern u32 readahead_debug_level; +#else +#define readahead_debug_level 0 +#endif /* CONFIG_DEBUG_READAHEAD */ + /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. @@ -746,10 +752,12 @@ void do_generic_mapping_read(struct addr unsigned long prev_index; loff_t isize; struct page *cached_page; + struct page *prev_page; int error; struct file_ra_state ra = *_ra; cached_page = NULL; + prev_page = NULL; index = *ppos >> PAGE_CACHE_SHIFT; next_index = index; prev_index = ra.prev_page; @@ -760,6 +768,10 @@ void do_generic_mapping_read(struct addr if (!isize) goto out; + if (readahead_debug_level >= 5) + printk(KERN_DEBUG "read-file(ino=%lu, req=%lu+%lu)\n", + inode->i_ino, index, last_index - index); + end_index = (isize - 1) >> PAGE_CACHE_SHIFT; for (;;) { struct page *page; @@ -778,16 +790,45 @@ void do_generic_mapping_read(struct addr nr = nr - offset; cond_resched(); - if (index == next_index) + + if (!prefer_adaptive_readahead() && index == next_index) next_index = page_cache_readahead(mapping, &ra, filp, index, last_index - index); find_page: page = find_get_page(mapping, index); + if (prefer_adaptive_readahead()) { + if (unlikely(page == NULL)) { + ra.prev_page = prev_index; + page_cache_readahead_adaptive(mapping, &ra, + filp, prev_page, NULL, + *ppos >> PAGE_CACHE_SHIFT, + index, last_index); + page = find_get_page(mapping, index); + } else if (PageReadahead(page)) { + ra.prev_page = prev_index; + page_cache_readahead_adaptive(mapping, &ra, + filp, prev_page, page, + *ppos >> PAGE_CACHE_SHIFT, + index, last_index); + } + } if (unlikely(page == NULL)) { - handle_ra_miss(mapping, &ra, index); + if (!prefer_adaptive_readahead()) + handle_ra_miss(mapping, &ra, index); goto no_cached_page; } + + if (prev_page) + page_cache_release(prev_page); + prev_page = page; + + readahead_cache_hit(&ra, page); + if (readahead_debug_level >= 7) + printk(KERN_DEBUG "read-page(ino=%lu, idx=%lu, io=%s)\n", + inode->i_ino, index, + PageUptodate(page) ? "hit" : "miss"); + if (!PageUptodate(page)) goto page_not_up_to_date; page_ok: @@ -822,7 +863,6 @@ page_ok: index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; - page_cache_release(page); if (ret == nr && desc->count) continue; goto out; @@ -834,7 +874,6 @@ page_not_up_to_date: /* Did it get unhashed before we got the lock? */ if (!page->mapping) { unlock_page(page); - page_cache_release(page); continue; } @@ -864,7 +903,6 @@ readpage: * invalidate_inode_pages got it */ unlock_page(page); - page_cache_release(page); goto find_page; } unlock_page(page); @@ -885,7 +923,6 @@ readpage: isize = i_size_read(inode); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) { - page_cache_release(page); goto out; } @@ -894,7 +931,6 @@ readpage: if (index == end_index) { nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (nr <= offset) { - page_cache_release(page); goto out; } } @@ -904,7 +940,6 @@ readpage: readpage_error: /* UHHUH! A synchronous read error occurred. Report it */ desc->error = error; - page_cache_release(page); goto out; no_cached_page: @@ -929,15 +964,22 @@ no_cached_page: } page = cached_page; cached_page = NULL; + if (prev_page) + page_cache_release(prev_page); + prev_page = page; goto readpage; } out: *_ra = ra; + if (prefer_adaptive_readahead()) + _ra->prev_page = prev_index; *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; if (cached_page) page_cache_release(cached_page); + if (prev_page) + page_cache_release(prev_page); if (filp) file_accessed(filp); } @@ -1216,6 +1258,7 @@ struct page *filemap_nopage(struct vm_ar unsigned long size, pgoff; int did_readaround = 0, majmin = VM_FAULT_MINOR; + ra->flags |= RA_FLAG_MMAP; pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; retry_all: @@ -1233,7 +1276,7 @@ retry_all: * * For sequential accesses, we use the generic readahead logic. */ - if (VM_SequentialReadHint(area)) + if (!prefer_adaptive_readahead() && VM_SequentialReadHint(area)) page_cache_readahead(mapping, ra, file, pgoff, 1); /* @@ -1241,11 +1284,24 @@ retry_all: */ retry_find: page = find_get_page(mapping, pgoff); + if (prefer_adaptive_readahead() && VM_SequentialReadHint(area)) { + if (!page) { + page_cache_readahead_adaptive(mapping, ra, + file, NULL, NULL, + pgoff, pgoff, pgoff + 1); + page = find_get_page(mapping, pgoff); + } else if (PageReadahead(page)) { + page_cache_readahead_adaptive(mapping, ra, + file, NULL, page, + pgoff, pgoff, pgoff + 1); + } + } if (!page) { unsigned long ra_pages; if (VM_SequentialReadHint(area)) { - handle_ra_miss(mapping, ra, pgoff); + if (!prefer_adaptive_readahead()) + handle_ra_miss(mapping, ra, pgoff); goto no_cached_page; } ra->mmap_miss++; @@ -1282,6 +1338,14 @@ retry_find: if (!did_readaround) ra->mmap_hit++; + readahead_cache_hit(ra, page); + if (readahead_debug_level >= 6) + printk(KERN_DEBUG "read-mmap(ino=%lu, idx=%lu, hint=%s, io=%s)\n", + inode->i_ino, pgoff, + VM_RandomReadHint(area) ? "random" : + (VM_SequentialReadHint(area) ? "sequential" : "none"), + PageUptodate(page) ? "hit" : "miss"); + /* * Ok, found a page in the page cache, now we need to check * that it's up-to-date. @@ -1296,6 +1360,8 @@ success: mark_page_accessed(page); if (type) *type = majmin; + if (prefer_adaptive_readahead()) + ra->prev_page = page->index; return page; outside_data_content: Index: linux-2.6.16-ck1/mm/memory.c =================================================================== --- linux-2.6.16-ck1.orig/mm/memory.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/mm/memory.c 2006-03-20 20:47:04.000000000 +1100 @@ -1993,6 +1993,7 @@ static int do_anonymous_page(struct mm_s page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; + inc_readahead_aging(); inc_mm_counter(mm, anon_rss); lru_cache_add_active(page); page_add_new_anon_rmap(page, vma, address); Index: linux-2.6.16-ck1/mm/page-writeback.c =================================================================== --- linux-2.6.16-ck1.orig/mm/page-writeback.c 2006-03-20 20:46:53.000000000 +1100 +++ linux-2.6.16-ck1/mm/page-writeback.c 2006-03-20 20:47:04.000000000 +1100 @@ -370,7 +370,7 @@ static void wb_timer_fn(unsigned long un static void laptop_timer_fn(unsigned long unused); static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); -static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); +DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); /* * Periodic writeback of "old" data. Index: linux-2.6.16-ck1/mm/page_alloc.c =================================================================== --- linux-2.6.16-ck1.orig/mm/page_alloc.c 2006-03-20 20:46:59.000000000 +1100 +++ linux-2.6.16-ck1/mm/page_alloc.c 2006-03-20 20:47:04.000000000 +1100 @@ -532,7 +532,7 @@ static int prep_new_page(struct page *pa if (PageReserved(page)) return 1; - page->flags &= ~(1 << PG_uptodate | 1 << PG_error | + page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked | 1 << PG_mappedtodisk); set_page_private(page, 0); Index: linux-2.6.16-ck1/mm/readahead.c =================================================================== --- linux-2.6.16-ck1.orig/mm/readahead.c 2006-03-20 20:46:23.000000000 +1100 +++ linux-2.6.16-ck1/mm/readahead.c 2006-03-20 20:47:04.000000000 +1100 @@ -14,6 +14,300 @@ #include #include #include +#include +#include +#include + +/* The default max/min read-ahead pages. */ +#define KB(size) (((size)*1024 + PAGE_CACHE_SIZE-1) / PAGE_CACHE_SIZE) +#define MAX_RA_PAGES KB(VM_MAX_READAHEAD) +#define MIN_RA_PAGES KB(VM_MIN_READAHEAD) +#define MIN_NFSD_PAGES KB(NFSSVC_MAXBLKSIZE/1024) + +#define next_page(pg) (list_entry((pg)->lru.prev, struct page, lru)) +#define prev_page(pg) (list_entry((pg)->lru.next, struct page, lru)) + +#ifdef CONFIG_ADAPTIVE_READAHEAD +/* + * Adaptive read-ahead parameters. + */ + +/* In laptop mode, poll delayed look-ahead on every ## pages read. */ +#define LAPTOP_POLL_INTERVAL 16 + +/* Set look-ahead size to 1/# of the thrashing-threshold. */ +#define LOOKAHEAD_RATIO 8 + +/* Set read-ahead size to ##% of the thrashing-threshold. */ +int readahead_ratio = 50; +EXPORT_SYMBOL(readahead_ratio); + +/* Readahead as long as cache hit ratio keeps above 1/##. */ +int readahead_hit_rate = 2; +EXPORT_SYMBOL(readahead_hit_rate); + +/* + * Measures the aging process of cold pages. + * Mainly increased on fresh page references to make it smooth. + */ +DEFINE_PER_CPU(unsigned long, readahead_aging); +EXPORT_PER_CPU_SYMBOL(readahead_aging); + +/* + * Detailed classification of read-ahead behaviors. + */ +#define RA_CLASS_SHIFT 4 +#define RA_CLASS_MASK ((1 << RA_CLASS_SHIFT) - 1) +enum ra_class { + RA_CLASS_ALL, + RA_CLASS_NEWFILE, + RA_CLASS_STATE, + RA_CLASS_CONTEXT, + RA_CLASS_CONTEXT_AGGRESSIVE, + RA_CLASS_BACKWARD, + RA_CLASS_THRASHING, + RA_CLASS_SEEK, + RA_CLASS_END, +}; +#endif /* CONFIG_ADAPTIVE_READAHEAD */ + +/* + * Read-ahead events accounting. + */ +#ifdef CONFIG_DEBUG_READAHEAD +#include +#include +#include +#include + +#define DEBUG_READAHEAD_RADIXTREE + +/* Read-ahead events to be accounted. */ +enum ra_event { + RA_EVENT_CACHE_MISS, /* read cache misses */ + RA_EVENT_READRANDOM, /* random reads */ + RA_EVENT_IO_CONGESTION, /* io congestion */ + RA_EVENT_IO_CACHE_HIT, /* canceled io due to cache hit */ + RA_EVENT_IO_BLOCK, /* read on locked page */ + + RA_EVENT_READAHEAD, /* read-ahead issued */ + RA_EVENT_READAHEAD_HIT, /* read-ahead page hit */ + RA_EVENT_LOOKAHEAD, /* look-ahead issued */ + RA_EVENT_LOOKAHEAD_HIT, /* look-ahead mark hit */ + RA_EVENT_LOOKAHEAD_NOACTION, /* look-ahead mark ignored */ + RA_EVENT_READAHEAD_MMAP, /* read-ahead for memory mapped file */ + RA_EVENT_READAHEAD_EOF, /* read-ahead reaches EOF */ + RA_EVENT_READAHEAD_SHRINK, /* ra_size under previous la_size */ + RA_EVENT_READAHEAD_THRASHING, /* read-ahead thrashing happened */ + RA_EVENT_READAHEAD_MUTILATE, /* read-ahead request mutilated */ + RA_EVENT_READAHEAD_RESCUE, /* read-ahead rescued */ + + RA_EVENT_END +}; + +static const char * const ra_event_name[] = { + "cache_miss", + "read_random", + "io_congestion", + "io_cache_hit", + "io_block", + "readahead", + "readahead_hit", + "lookahead", + "lookahead_hit", + "lookahead_ignore", + "readahead_mmap", + "readahead_eof", + "readahead_shrink", + "readahead_thrash", + "readahead_mutilt", + "readahead_rescue", +}; + +static const char * const ra_class_name[] = { + "total", + "newfile", + "state", + "context", + "contexta", + "backward", + "onthrash", + "onraseek", + "none", +}; + +static unsigned long ra_events[RA_CLASS_END+1][RA_EVENT_END+1][2]; + +static inline void ra_account(struct file_ra_state *ra, + enum ra_event e, int pages) +{ + enum ra_class c; + + if (e == RA_EVENT_READAHEAD_HIT && pages < 0) { + c = (ra->flags >> RA_CLASS_SHIFT) & RA_CLASS_MASK; + pages = -pages; + } else if (ra) + c = ra->flags & RA_CLASS_MASK; + else + c = RA_CLASS_END; + + if (!c) + c = RA_CLASS_END; + + ra_events[c][e][0] += 1; + ra_events[c][e][1] += pages; + + if (e == RA_EVENT_READAHEAD) + ra_events[c][RA_EVENT_END][1] += pages * pages; +} + +static int ra_events_show(struct seq_file *s, void *_) +{ + int i; + int c; + int e; + static const char event_fmt[] = "%-16s"; + static const char class_fmt[] = "%10s"; + static const char item_fmt[] = "%10lu"; + static const char percent_format[] = "%9lu%%"; + static const char * const table_name[] = { + "[table requests]", + "[table pages]", + "[table summary]"}; + + for (i = 0; i <= 1; i++) { + for (e = 0; e <= RA_EVENT_END; e++) { + ra_events[0][e][i] = 0; + for (c = 1; c < RA_CLASS_END; c++) + ra_events[0][e][i] += ra_events[c][e][i]; + } + + seq_printf(s, event_fmt, table_name[i]); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, class_fmt, ra_class_name[c]); + seq_puts(s, "\n"); + + for (e = 0; e < RA_EVENT_END; e++) { + if (e == RA_EVENT_READAHEAD_HIT && i == 0) + continue; + if (e == RA_EVENT_IO_BLOCK && i == 1) + continue; + + seq_printf(s, event_fmt, ra_event_name[e]); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, item_fmt, ra_events[c][e][i]); + seq_puts(s, "\n"); + } + seq_puts(s, "\n"); + } + + seq_printf(s, event_fmt, table_name[2]); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, class_fmt, ra_class_name[c]); + seq_puts(s, "\n"); + + seq_printf(s, event_fmt, "random_rate"); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, percent_format, + (ra_events[c][RA_EVENT_READRANDOM][0] * 100) / + ((ra_events[c][RA_EVENT_READRANDOM][0] + + ra_events[c][RA_EVENT_READAHEAD][0]) | 1)); + seq_puts(s, "\n"); + + seq_printf(s, event_fmt, "ra_hit_rate"); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, percent_format, + (ra_events[c][RA_EVENT_READAHEAD_HIT][1] * 100) / + (ra_events[c][RA_EVENT_READAHEAD][1] | 1)); + seq_puts(s, "\n"); + + seq_printf(s, event_fmt, "la_hit_rate"); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, percent_format, + (ra_events[c][RA_EVENT_LOOKAHEAD_HIT][0] * 100) / + (ra_events[c][RA_EVENT_LOOKAHEAD][0] | 1)); + seq_puts(s, "\n"); + + seq_printf(s, event_fmt, "var_ra_size"); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, item_fmt, + (ra_events[c][RA_EVENT_END][1] - + ra_events[c][RA_EVENT_READAHEAD][1] * + (ra_events[c][RA_EVENT_READAHEAD][1] / + (ra_events[c][RA_EVENT_READAHEAD][0] | 1))) / + (ra_events[c][RA_EVENT_READAHEAD][0] | 1)); + seq_puts(s, "\n"); + + seq_printf(s, event_fmt, "avg_ra_size"); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, item_fmt, + (ra_events[c][RA_EVENT_READAHEAD][1] + + ra_events[c][RA_EVENT_READAHEAD][0] / 2) / + (ra_events[c][RA_EVENT_READAHEAD][0] | 1)); + seq_puts(s, "\n"); + + seq_printf(s, event_fmt, "avg_la_size"); + for (c = 0; c <= RA_CLASS_END; c++) + seq_printf(s, item_fmt, + (ra_events[c][RA_EVENT_LOOKAHEAD][1] + + ra_events[c][RA_EVENT_LOOKAHEAD][0] / 2) / + (ra_events[c][RA_EVENT_LOOKAHEAD][0] | 1)); + seq_puts(s, "\n"); + + return 0; +} + +static int ra_events_open(struct inode *inode, struct file *file) +{ + return single_open(file, ra_events_show, NULL); +} + +static ssize_t ra_events_write(struct file *file, const char __user *buf, + size_t size, loff_t *offset) +{ + memset(ra_events, 0, sizeof(ra_events)); + return 1; +} + +struct file_operations ra_events_fops = { + .owner = THIS_MODULE, + .open = ra_events_open, + .write = ra_events_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +u32 readahead_debug_level = 0; +u32 disable_stateful_method = 0; + +static int __init readahead_init(void) +{ + struct dentry *root; + + root = debugfs_create_dir("readahead", NULL); + + debugfs_create_file("events", 0644, root, NULL, &ra_events_fops); + + debugfs_create_u32("debug_level", 0644, root, &readahead_debug_level); + debugfs_create_bool("disable_stateful_method", 0644, root, + &disable_stateful_method); + + return 0; +} + +module_init(readahead_init) +#else +#define ra_account(ra, e, pages) do { } while (0) +#define readahead_debug_level (0) +#define disable_stateful_method (0) +#endif /* CONFIG_DEBUG_READAHEAD */ + +#define dprintk(args...) \ + do { if (readahead_debug_level >= 1) printk(KERN_DEBUG args); } while(0) +#define ddprintk(args...) \ + do { if (readahead_debug_level >= 2) printk(KERN_DEBUG args); } while(0) + void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { @@ -21,7 +315,7 @@ void default_unplug_io_fn(struct backing EXPORT_SYMBOL(default_unplug_io_fn); struct backing_dev_info default_backing_dev_info = { - .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE, + .ra_pages = MAX_RA_PAGES, .state = 0, .capabilities = BDI_CAP_MAP_COPY, .unplug_io_fn = default_unplug_io_fn, @@ -49,7 +343,7 @@ static inline unsigned long get_max_read static inline unsigned long get_min_readahead(struct file_ra_state *ra) { - return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; + return MIN_RA_PAGES; } static inline void ra_off(struct file_ra_state *ra) @@ -134,8 +428,10 @@ int read_cache_pages(struct address_spac continue; } ret = filler(data, page); - if (!pagevec_add(&lru_pvec, page)) + if (!pagevec_add(&lru_pvec, page)) { + cond_resched(); __pagevec_lru_add(&lru_pvec); + } if (ret) { while (!list_empty(pages)) { struct page *victim; @@ -173,8 +469,10 @@ static int read_pages(struct address_spa page->index, GFP_KERNEL)) { ret = mapping->a_ops->readpage(filp, page); if (ret != AOP_TRUNCATED_PAGE) { - if (!pagevec_add(&lru_pvec, page)) + if (!pagevec_add(&lru_pvec, page)) { + cond_resched(); __pagevec_lru_add(&lru_pvec); + } continue; } /* else fall through to release */ } @@ -257,7 +555,8 @@ out: */ static int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read) + pgoff_t offset, unsigned long nr_to_read, + unsigned long lookahead_size) { struct inode *inode = mapping->host; struct page *page; @@ -270,7 +569,7 @@ __do_page_cache_readahead(struct address if (isize == 0) goto out; - end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); + end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); /* * Preallocate as many pages as we will need. @@ -287,12 +586,15 @@ __do_page_cache_readahead(struct address continue; read_unlock_irq(&mapping->tree_lock); + cond_resched(); page = page_cache_alloc_cold(mapping); read_lock_irq(&mapping->tree_lock); if (!page) break; page->index = page_offset; list_add(&page->lru, &page_pool); + if (page_idx == nr_to_read - lookahead_size) + __SetPageReadahead(page); ret++; } read_unlock_irq(&mapping->tree_lock); @@ -329,7 +631,7 @@ int force_page_cache_readahead(struct ad if (this_chunk > nr_to_read) this_chunk = nr_to_read; err = __do_page_cache_readahead(mapping, filp, - offset, this_chunk); + offset, this_chunk, 0); if (err < 0) { ret = err; break; @@ -338,6 +640,9 @@ int force_page_cache_readahead(struct ad offset += this_chunk; nr_to_read -= this_chunk; } + + ra_account(NULL, RA_EVENT_READAHEAD, ret); + return ret; } @@ -373,10 +678,16 @@ static inline int check_ra_success(struc int do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) { + unsigned long ret; + if (bdi_read_congested(mapping->backing_dev_info)) return -1; - return __do_page_cache_readahead(mapping, filp, offset, nr_to_read); + ret = __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); + + ra_account(NULL, RA_EVENT_READAHEAD, ret); + + return ret; } /* @@ -396,7 +707,11 @@ blockable_page_cache_readahead(struct ad if (!block && bdi_read_congested(mapping->backing_dev_info)) return 0; - actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); + actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); + + ra_account(NULL, RA_EVENT_READAHEAD, actual); + dprintk("blockable-readahead(ino=%lu, ra=%lu+%lu) = %d\n", + mapping->host->i_ino, offset, nr_to_read, actual); return check_ra_success(ra, nr_to_read, actual); } @@ -442,7 +757,7 @@ static int make_ahead_window(struct addr * @req_size: hint: total size of the read which the caller is performing in * PAGE_CACHE_SIZE units * - * page_cache_readahead() is the main function. If performs the adaptive + * page_cache_readahead() is the main function. It performs the adaptive * readahead window size management and submits the readahead I/O. * * Note that @filp is purely used for passing on to the ->readpage[s]() @@ -572,3 +887,1187 @@ unsigned long max_sane_readahead(unsigne __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id())); return min(nr, (inactive + free) / 2); } + +/* + * Adaptive read-ahead. + * + * Good read patterns are compact both in space and time. The read-ahead logic + * tries to grant larger read-ahead size to better readers under the constraint + * of system memory and load pressure. + * + * It employs two methods to estimate the max thrashing safe read-ahead size: + * 1. state based - the default one + * 2. context based - the failsafe one + * The integration of the dual methods has the merit of being agile and robust. + * It makes the overall design clean: special cases are handled in general by + * the stateless method, leaving the stateful one simple and fast. + * + * To improve throughput and decrease read delay, the logic 'looks ahead'. + * In most read-ahead chunks, one page will be selected and tagged with + * PG_readahead. Later when the page with PG_readahead is read, the logic + * will be notified to submit the next read-ahead chunk in advance. + * + * a read-ahead chunk + * +-----------------------------------------+ + * | # PG_readahead | + * +-----------------------------------------+ + * ^ When this page is read, notify me for the next read-ahead. + * + * + * Here are some variable names used frequently: + * + * |<------- la_size ------>| + * +-----------------------------------------+ + * | # | + * +-----------------------------------------+ + * ra_index -->|<---------------- ra_size -------------->| + * + */ + +#ifdef CONFIG_ADAPTIVE_READAHEAD + +/* + * The nature of read-ahead allows false tests to occur occasionally. + * Here we just do not bother to call get_page(), it's meaningless anyway. + */ +static inline struct page *__find_page(struct address_space *mapping, + pgoff_t offset) +{ + return radix_tree_lookup(&mapping->page_tree, offset); +} + +static inline struct page *find_page(struct address_space *mapping, + pgoff_t offset) +{ + struct page *page; + + read_lock_irq(&mapping->tree_lock); + page = __find_page(mapping, offset); + read_unlock_irq(&mapping->tree_lock); + return page; +} + +/* + * Move pages in danger (of thrashing) to the head of inactive_list. + * Not expected to happen frequently. + */ +static unsigned long rescue_pages(struct page *page, unsigned long nr_pages) +{ + int pgrescue; + pgoff_t index; + struct zone *zone; + struct address_space *mapping; + + BUG_ON(!nr_pages || !page); + pgrescue = 0; + index = page_index(page); + mapping = page_mapping(page); + + dprintk("rescue_pages(ino=%lu, index=%lu nr=%lu)\n", + mapping->host->i_ino, index, nr_pages); + + for(;;) { + zone = page_zone(page); + spin_lock_irq(&zone->lru_lock); + + if (!PageLRU(page)) + goto out_unlock; + + while (page_mapping(page) == mapping && + page_index(page) == index) { + struct page *the_page = page; + page = next_page(page); + if (!PageActive(the_page) && + !PageLocked(the_page) && + page_count(the_page) == 1) { + list_move(&the_page->lru, &zone->inactive_list); + pgrescue++; + } + index++; + if (!--nr_pages) + goto out_unlock; + } + + spin_unlock_irq(&zone->lru_lock); + + cond_resched(); + page = find_page(mapping, index); + if (!page) + goto out; + } +out_unlock: + spin_unlock_irq(&zone->lru_lock); +out: + ra_account(NULL, RA_EVENT_READAHEAD_RESCUE, pgrescue); + return nr_pages; +} + +/* + * Set a new look-ahead mark at @new_index. + * Return 0 if the new mark is successfully set. + */ +static inline int renew_lookahead(struct address_space *mapping, + struct file_ra_state *ra, + pgoff_t index, pgoff_t new_index) +{ + struct page *page; + + if (index == ra->lookahead_index && + new_index >= ra->readahead_index) + return 1; + + page = find_page(mapping, new_index); + if (!page) + return 1; + + __SetPageReadahead(page); + if (ra->lookahead_index == index) + ra->lookahead_index = new_index; + + return 0; +} + +/* + * State based calculation of read-ahead request. + * + * This figure shows the meaning of file_ra_state members: + * + * chunk A chunk B + * +---------------------------+-------------------------------------------+ + * | # | # | + * +---------------------------+-------------------------------------------+ + * ^ ^ ^ ^ + * la_index ra_index lookahead_index readahead_index + */ + +/* + * The node's effective length of inactive_list(s). + */ +static unsigned long node_free_and_cold_pages(void) +{ + unsigned int i; + unsigned long sum = 0; + struct zone *zones = NODE_DATA(numa_node_id())->node_zones; + + for (i = 0; i < MAX_NR_ZONES; i++) + sum += zones[i].nr_inactive + + zones[i].free_pages - zones[i].pages_low; + + return sum; +} + +/* + * The node's accumulated aging activities. + */ +static unsigned long node_readahead_aging(void) +{ + unsigned long cpu; + unsigned long sum = 0; + cpumask_t mask = node_to_cpumask(numa_node_id()); + + for_each_cpu_mask(cpu, mask) + sum += per_cpu(readahead_aging, cpu); + + return sum; +} + +/* + * The 64bit cache_hits stores three accumulated values and a counter value. + * MSB LSB + * 3333333333333333 : 2222222222222222 : 1111111111111111 : 0000000000000000 + */ +static inline int ra_cache_hit(struct file_ra_state *ra, int nr) +{ + return (ra->cache_hits >> (nr * 16)) & 0xFFFF; +} + +/* + * Conceptual code: + * ra_cache_hit(ra, 1) += ra_cache_hit(ra, 0); + * ra_cache_hit(ra, 0) = 0; + */ +static inline void ra_addup_cache_hit(struct file_ra_state *ra) +{ + int n; + + n = ra_cache_hit(ra, 0); + ra->cache_hits -= n; + n <<= 16; + ra->cache_hits += n; +} + +/* + * The read-ahead is deemed success if cache-hit-rate >= 1/readahead_hit_rate. + */ +static inline int ra_cache_hit_ok(struct file_ra_state *ra) +{ + return ra_cache_hit(ra, 0) * readahead_hit_rate >= + (ra->lookahead_index - ra->la_index); +} + +/* + * Check if @index falls in the @ra request. + */ +static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) +{ + if (index < ra->la_index || index >= ra->readahead_index) + return 0; + + if (index >= ra->ra_index) + return 1; + else + return -1; +} + +/* + * Which method is issuing this read-ahead? + */ +static inline void ra_set_class(struct file_ra_state *ra, + enum ra_class ra_class) +{ + unsigned long flags_mask; + unsigned long flags; + unsigned long old_ra_class; + + flags_mask = ~(RA_CLASS_MASK | (RA_CLASS_MASK << RA_CLASS_SHIFT)); + flags = ra->flags & flags_mask; + + old_ra_class = (ra->flags & RA_CLASS_MASK) << RA_CLASS_SHIFT; + + ra->flags = flags | old_ra_class | ra_class; + + ra_addup_cache_hit(ra); + if (ra_class != RA_CLASS_STATE) + ra->cache_hits <<= 16; + + ra->age = node_readahead_aging(); +} + +/* + * Where is the old read-ahead and look-ahead? + */ +static inline void ra_set_index(struct file_ra_state *ra, + pgoff_t la_index, pgoff_t ra_index) +{ + ra->la_index = la_index; + ra->ra_index = ra_index; +} + +/* + * Where is the new read-ahead and look-ahead? + */ +static inline void ra_set_size(struct file_ra_state *ra, + unsigned long ra_size, unsigned long la_size) +{ + /* Disable look-ahead for loopback file. */ + if (unlikely(ra->flags & RA_FLAG_NO_LOOKAHEAD)) + la_size = 0; + + ra->readahead_index = ra->ra_index + ra_size; + ra->lookahead_index = ra->readahead_index - la_size; +} + +/* + * Submit IO for the read-ahead request in file_ra_state. + */ +static int ra_dispatch(struct file_ra_state *ra, + struct address_space *mapping, struct file *filp) +{ + pgoff_t eof_index; + unsigned long ra_size; + unsigned long la_size; + int actual; + enum ra_class ra_class; + + ra_class = (ra->flags & RA_CLASS_MASK); + BUG_ON(ra_class == 0 || ra_class > RA_CLASS_END); + + eof_index = ((i_size_read(mapping->host) - 1) >> PAGE_CACHE_SHIFT) + 1; + ra_size = ra->readahead_index - ra->ra_index; + la_size = ra->readahead_index - ra->lookahead_index; + + /* Snap to EOF. */ + if (unlikely(ra->ra_index >= eof_index)) + return 0; + if (ra->readahead_index + ra_size / 2 > eof_index) { + if (ra_class == RA_CLASS_CONTEXT_AGGRESSIVE && + eof_index > ra->lookahead_index + 1) + la_size = eof_index - ra->lookahead_index; + else + la_size = 0; + ra_size = eof_index - ra->ra_index; + ra_set_size(ra, ra_size, la_size); + } + + actual = __do_page_cache_readahead(mapping, filp, + ra->ra_index, ra_size, la_size); + +#ifdef CONFIG_DEBUG_READAHEAD + if (ra->flags & RA_FLAG_MMAP) + ra_account(ra, RA_EVENT_READAHEAD_MMAP, actual); + if (ra->readahead_index == eof_index) + ra_account(ra, RA_EVENT_READAHEAD_EOF, actual); + if (la_size) + ra_account(ra, RA_EVENT_LOOKAHEAD, la_size); + if (ra_size > actual) + ra_account(ra, RA_EVENT_IO_CACHE_HIT, ra_size - actual); + ra_account(ra, RA_EVENT_READAHEAD, actual); + + if (!ra->ra_index && filp->f_dentry->d_inode) { + char *fn; + static char path[1024]; + unsigned long size; + + size = (i_size_read(filp->f_dentry->d_inode)+1023)/1024; + fn = d_path(filp->f_dentry, filp->f_vfsmnt, path, 1000); + if (!IS_ERR(fn)) + ddprintk("ino %lu is %s size %luK by %s(%d)\n", + filp->f_dentry->d_inode->i_ino, + fn, size, + current->comm, current->pid); + } + + dprintk("readahead-%s(ino=%lu, index=%lu, ra=%lu+%lu-%lu) = %d\n", + ra_class_name[ra_class], + mapping->host->i_ino, ra->la_index, + ra->ra_index, ra_size, la_size, actual); +#endif /* CONFIG_DEBUG_READAHEAD */ + + return actual; +} + +/* + * Determine the ra request from primitive values. + * + * It applies the following rules: + * - Substract ra_size by the old look-ahead to get real safe read-ahead; + * - Set new la_size according to the (still large) ra_size; + * - Apply upper limits; + * - Make sure stream_shift is not too small. + * (So that the next global_shift will not be too small.) + * + * Input: + * ra_size stores the estimated thrashing-threshold. + * la_size stores the look-ahead size of previous request. + */ +static inline int adjust_rala(unsigned long ra_max, + unsigned long *ra_size, unsigned long *la_size) +{ + unsigned long stream_shift = *la_size; + + if (*ra_size > *la_size) + *ra_size -= *la_size; + else { + ra_account(NULL, RA_EVENT_READAHEAD_SHRINK, *ra_size); + return 0; + } + + *la_size = *ra_size / LOOKAHEAD_RATIO; + + if (*ra_size > ra_max) + *ra_size = ra_max; + if (*la_size > *ra_size) + *la_size = *ra_size; + + stream_shift += (*ra_size - *la_size); + if (stream_shift < *ra_size / 4) + *la_size -= (*ra_size / 4 - stream_shift); + + return 1; +} + +/* + * The function estimates two values: + * 1. thrashing-threshold for the current stream + * It is returned to make the next read-ahead request. + * 2. the remained safe space for the current chunk + * It will be checked to ensure that the current chunk is safe. + * + * The computation will be pretty accurate under heavy load, and will vibrate + * more on light load(with small global_shift), so the grow speed of ra_size + * must be limited, and a moderate large stream_shift must be insured. + * + * This figure illustrates the formula used in the function: + * While the stream reads stream_shift pages inside the chunks, + * the chunks are shifted global_shift pages inside inactive_list. + * + * chunk A chunk B + * |<=============== global_shift ================| + * +-------------+ +-------------------+ | + * | # | | # | inactive_list | + * +-------------+ +-------------------+ head | + * |---->| |---------->| + * | | + * +-- stream_shift --+ + */ +static inline unsigned long compute_thrashing_threshold( + struct file_ra_state *ra, + unsigned long *remain) +{ + unsigned long global_size; + unsigned long global_shift; + unsigned long stream_shift; + unsigned long ra_size; + uint64_t ll; + + global_size = node_free_and_cold_pages(); + global_shift = node_readahead_aging() - ra->age; + global_shift |= 1UL; + stream_shift = ra_cache_hit(ra, 0); + + ll = (uint64_t) stream_shift * (global_size >> 9) * readahead_ratio * 5; + do_div(ll, global_shift); + ra_size = ll; + + if (global_size > global_shift) { + ll = (uint64_t) stream_shift * (global_size - global_shift); + do_div(ll, global_shift); + *remain = ll; + } else + *remain = 0; + + ddprintk("compute_thrashing_threshold: " + "at %lu ra %lu=%lu*%lu/%lu, remain %lu for %lu\n", + ra->readahead_index, ra_size, + stream_shift, global_size, global_shift, + *remain, ra->readahead_index - ra->lookahead_index); + + return ra_size; +} + +/* + * Main function for file_ra_state based read-ahead. + */ +static inline unsigned long +state_based_readahead(struct address_space *mapping, struct file *filp, + struct file_ra_state *ra, + struct page *page, pgoff_t index, + unsigned long ra_size, unsigned long ra_max) +{ + unsigned long ra_old; + unsigned long la_size; + unsigned long remain_space; + unsigned long growth_limit; + + la_size = ra->readahead_index - index; + ra_old = ra->readahead_index - ra->ra_index; + growth_limit = ra_size + ra_max / 16 + + (2 + readahead_ratio / 64) * ra_old; + ra_size = compute_thrashing_threshold(ra, &remain_space); + + if (page && remain_space <= la_size && la_size > 1) { + rescue_pages(page, la_size); + return 0; + } + + if (!adjust_rala(min(ra_max, growth_limit), &ra_size, &la_size)) + return 0; + + ra_set_class(ra, RA_CLASS_STATE); + ra_set_index(ra, index, ra->readahead_index); + ra_set_size(ra, ra_size, la_size); + + return ra_dispatch(ra, mapping, filp); +} + +/* + * Page cache context based estimation of read-ahead/look-ahead size/index. + * + * The logic first looks around to find the start point of next read-ahead, + * and then, if necessary, looks backward in the inactive_list to get an + * estimation of the thrashing-threshold. + * + * The estimation theory can be illustrated with figure: + * + * chunk A chunk B chunk C head + * + * l01 l11 l12 l21 l22 + *| |-->|-->| |------>|-->| |------>| + *| +-------+ +-----------+ +-------------+ | + *| | # | | # | | # | | + *| +-------+ +-----------+ +-------------+ | + *| |<==============|<===========================|<============================| + * L0 L1 L2 + * + * Let f(l) = L be a map from + * l: the number of pages read by the stream + * to + * L: the number of pages pushed into inactive_list in the mean time + * then + * f(l01) <= L0 + * f(l11 + l12) = L1 + * f(l21 + l22) = L2 + * ... + * f(l01 + l11 + ...) <= Sum(L0 + L1 + ...) + * <= Length(inactive_list) = f(thrashing-threshold) + * + * So the count of countinuous history pages left in the inactive_list is always + * a lower estimation of the true thrashing-threshold. + */ + +#define PAGE_REFCNT_0 0 +#define PAGE_REFCNT_1 (1 << PG_referenced) +#define PAGE_REFCNT_2 (1 << PG_active) +#define PAGE_REFCNT_3 ((1 << PG_active) | (1 << PG_referenced)) +#define PAGE_REFCNT_MASK PAGE_REFCNT_3 + +/* + * STATUS REFERENCE COUNT + * __ 0 + * _R PAGE_REFCNT_1 + * A_ PAGE_REFCNT_2 + * AR PAGE_REFCNT_3 + * + * A/R: Active / Referenced + */ +static inline unsigned long page_refcnt(struct page *page) +{ + return page->flags & PAGE_REFCNT_MASK; +} + +/* + * STATUS REFERENCE COUNT TYPE + * __ 0 fresh + * _R PAGE_REFCNT_1 stale + * A_ PAGE_REFCNT_2 disturbed once + * AR PAGE_REFCNT_3 disturbed twice + * + * A/R: Active / Referenced + */ +static inline unsigned long cold_page_refcnt(struct page *page) +{ + if (!page || PageActive(page)) + return 0; + + return page_refcnt(page); +} + +static inline char page_refcnt_symbol(struct page *page) +{ + if (!page) + return 'X'; + + switch (page_refcnt(page)) { + case 0: + return '_'; + case PAGE_REFCNT_1: + return '-'; + case PAGE_REFCNT_2: + return '='; + case PAGE_REFCNT_3: + return '#'; + default: + return '?'; + } +} + +/* + * Count/estimate cache hits in range [first_index, last_index]. + * The estimation is simple and optimistic. + */ +static int count_cache_hit(struct address_space *mapping, + pgoff_t first_index, pgoff_t last_index) +{ + struct page *page; + int size = last_index - first_index + 1; + int count = 0; + int i; + + cond_resched(); + read_lock_irq(&mapping->tree_lock); + + /* + * The first page may well is chunk head and has been accessed, + * so it is index 0 that makes the estimation optimistic. This + * behavior guarantees a readahead when (size < ra_max) and + * (readahead_hit_rate >= 16). + */ + for (i = 0; i < 16;) { + page = __find_page(mapping, first_index + + size * ((i++ * 29) & 15) / 16); + if (cold_page_refcnt(page) >= PAGE_REFCNT_1 && ++count >= 2) + break; + } + + read_unlock_irq(&mapping->tree_lock); + + return size * count / i; +} + +/* + * Look back and check history pages to estimate thrashing-threshold. + */ +static unsigned long query_page_cache_segment(struct address_space *mapping, + struct file_ra_state *ra, + unsigned long *remain, pgoff_t offset, + unsigned long ra_min, unsigned long ra_max) +{ + pgoff_t index; + unsigned long count; + unsigned long nr_lookback; + struct radix_tree_cache cache; + + /* + * Scan backward and check the near @ra_max pages. + * The count here determines ra_size. + */ + cond_resched(); + read_lock_irq(&mapping->tree_lock); + index = radix_tree_scan_hole_backward(&mapping->page_tree, + offset, ra_max); +#ifdef DEBUG_READAHEAD_RADIXTREE + WARN_ON(index > offset); + if (index != offset) + WARN_ON(!__find_page(mapping, index + 1)); + if (index && offset - index < ra_max) + WARN_ON(__find_page(mapping, index)); +#endif + read_unlock_irq(&mapping->tree_lock); + + *remain = offset - index; + + if (offset == ra->readahead_index && ra_cache_hit_ok(ra)) + count = *remain; + else if (count_cache_hit(mapping, index + 1, offset) * + readahead_hit_rate >= *remain) + count = *remain; + else + count = ra_min; + + /* + * Unnecessary to count more? + */ + if (count < ra_max) + goto out; + + if (unlikely(ra->flags & RA_FLAG_NO_LOOKAHEAD)) + goto out; + + /* + * Check the far pages coarsely. + * The big count here helps increase la_size. + */ + nr_lookback = ra_max * (LOOKAHEAD_RATIO + 1) * + 100 / (readahead_ratio + 1); + + cond_resched(); + radix_tree_cache_init(&cache); + read_lock_irq(&mapping->tree_lock); + for (count += ra_max; count < nr_lookback; count += ra_max) { + struct radix_tree_node *node; + node = radix_tree_cache_lookup_node(&mapping->page_tree, + &cache, offset - count, 1); +#ifdef DEBUG_READAHEAD_RADIXTREE + if (node != radix_tree_lookup_node(&mapping->page_tree, + offset - count, 1)) + BUG(); +#endif + if (!node) + break; + } + read_unlock_irq(&mapping->tree_lock); + +out: + /* + * For sequential read that extends from index 0, the counted value + * may well be far under the true threshold, so return it unmodified + * for further process in adjust_rala_aggressive(). + */ + if (count >= offset) + count = offset; + else + count = max(ra_min, count * readahead_ratio / 100); + + ddprintk("query_page_cache_segment: " + "ino=%lu, idx=%lu, count=%lu, remain=%lu\n", + mapping->host->i_ino, offset, count, *remain); + + return count; +} + +/* + * Find past-the-end index of the segment before @index. + */ +static inline pgoff_t find_segtail_backward(struct address_space *mapping, + pgoff_t index, unsigned long max_scan) +{ + struct radix_tree_cache cache; + struct page *page; + pgoff_t origin; + + origin = index; + if (max_scan > index) + max_scan = index; + + cond_resched(); + radix_tree_cache_init(&cache); + read_lock_irq(&mapping->tree_lock); + for (; origin - index < max_scan;) { + page = radix_tree_cache_lookup(&mapping->page_tree, + &cache, --index); + if (page) { + read_unlock_irq(&mapping->tree_lock); + return index + 1; + } + } + read_unlock_irq(&mapping->tree_lock); + + return 0; +} + +/* + * Find past-the-end index of the segment at @index. + */ +static inline pgoff_t find_segtail(struct address_space *mapping, + pgoff_t index, unsigned long max_scan) +{ + pgoff_t ra_index; + + cond_resched(); + read_lock_irq(&mapping->tree_lock); + ra_index = radix_tree_scan_hole(&mapping->page_tree, index, max_scan); +#ifdef DEBUG_READAHEAD_RADIXTREE + BUG_ON(!__find_page(mapping, index)); + WARN_ON(ra_index < index); + if (ra_index != index && !__find_page(mapping, ra_index - 1)) + printk(KERN_ERR "radix_tree_scan_hole(index=%lu ra_index=%lu " + "max_scan=%lu nrpages=%lu) fooled!\n", + index, ra_index, max_scan, mapping->nrpages); + if (ra_index != ~0UL && ra_index - index < max_scan) + WARN_ON(__find_page(mapping, ra_index)); +#endif + read_unlock_irq(&mapping->tree_lock); + + if (ra_index <= index + max_scan) + return ra_index; + else + return 0; +} + +/* + * Determine the request parameters for context based read-ahead that extends + * from start of file. + * + * The major weakness of stateless method is perhaps the slow grow up speed of + * ra_size. The logic tries to make up for this in the important case of + * sequential reads that extend from start of file. In this case, the ra_size + * is not chosen to make the whole next chunk safe (as in normal ones). Only + * half of which is safe. The added 'unsafe' half is the look-ahead part. It + * is expected to be safeguarded by rescue_pages() when the previous chunks are + * lost. + */ +static inline int adjust_rala_aggressive(unsigned long ra_max, + unsigned long *ra_size, unsigned long *la_size) +{ + pgoff_t index = *ra_size; + + *ra_size -= min(*ra_size, *la_size); + *ra_size = *ra_size * readahead_ratio / 100; + *la_size = index * readahead_ratio / 100; + *ra_size += *la_size; + + if (*ra_size > ra_max) + *ra_size = ra_max; + if (*la_size > *ra_size) + *la_size = *ra_size; + + return 1; +} + +/* + * Main function for page context based read-ahead. + */ +static inline int +try_context_based_readahead(struct address_space *mapping, + struct file_ra_state *ra, struct page *prev_page, + struct page *page, pgoff_t index, + unsigned long ra_min, unsigned long ra_max) +{ + pgoff_t ra_index; + unsigned long ra_size; + unsigned long la_size; + unsigned long remain_pages; + + /* Where to start read-ahead? + * NFSv3 daemons may process adjacent requests in parallel, + * leading to many locally disordered, globally sequential reads. + * So do not require nearby history pages to be present or accessed. + */ + if (page) { + ra_index = find_segtail(mapping, index, ra_max * 5 / 4); + if (!ra_index) + return -1; + } else if (prev_page || find_page(mapping, index - 1)) { + ra_index = index; + } else if (readahead_hit_rate > 1) { + ra_index = find_segtail_backward(mapping, index, + readahead_hit_rate + ra_min); + if (!ra_index) + return 0; + ra_min += 2 * (index - ra_index); + index = ra_index; /* pretend the request starts here */ + } else + return 0; + + ra_size = query_page_cache_segment(mapping, ra, &remain_pages, + index, ra_min, ra_max); + + la_size = ra_index - index; + if (page && remain_pages <= la_size && + remain_pages < index && la_size > 1) { + rescue_pages(page, la_size); + return -1; + } + + if (ra_size == index) { + if (!adjust_rala_aggressive(ra_max, &ra_size, &la_size)) + return -1; + ra_set_class(ra, RA_CLASS_CONTEXT_AGGRESSIVE); + } else { + if (!adjust_rala(ra_max, &ra_size, &la_size)) + return -1; + ra_set_class(ra, RA_CLASS_CONTEXT); + } + + ra_set_index(ra, index, ra_index); + ra_set_size(ra, ra_size, la_size); + + return 1; +} + +/* + * Read-ahead on start of file. + * + * The strategies here are most important for small files. + * 1. Set a moderately large read-ahead size; + * 2. Issue the next read-ahead request as soon as possible. + * + * But be careful, there are some applications that dip into only the very head + * of a file. The most important thing is to prevent them from triggering the + * next (much larger) read-ahead request, which leads to lots of cache misses. + * Two pages should be enough for them, correct me if I'm wrong. + */ +static inline unsigned long +newfile_readahead(struct address_space *mapping, + struct file *filp, struct file_ra_state *ra, + unsigned long req_size, unsigned long ra_min) +{ + unsigned long ra_size; + unsigned long la_size; + + if (req_size > ra_min) /* larger value risks thrashing */ + req_size = ra_min; + + if (unlikely(ra->flags & RA_FLAG_NFSD)) { + ra_size = MIN_NFSD_PAGES; + la_size = 0; + } else { + ra_size = 4 * req_size; + la_size = 2 * req_size; + } + + ra_set_class(ra, RA_CLASS_NEWFILE); + ra_set_index(ra, 0, 0); + ra_set_size(ra, ra_size, la_size); + + return ra_dispatch(ra, mapping, filp); +} + +/* + * Backward prefetching. + * No look ahead and thrashing threshold estimation for stepping backward + * pattern: should be unnecessary. + */ +static inline int +try_read_backward(struct file_ra_state *ra, pgoff_t begin_index, + unsigned long ra_size, unsigned long ra_max) +{ + pgoff_t end_index; + + /* Are we reading backward? */ + if (begin_index > ra->prev_page) + return 0; + + if ((ra->flags & RA_CLASS_MASK) == RA_CLASS_BACKWARD && + ra_has_index(ra, ra->prev_page)) { + ra_size += 2 * ra_cache_hit(ra, 0); + end_index = ra->la_index; + } else { + ra_size += ra_size + ra_size * (readahead_hit_rate - 1) / 2; + end_index = ra->prev_page; + } + + if (ra_size > ra_max) + ra_size = ra_max; + + /* Read traces close enough to be covered by the prefetching? */ + if (end_index > begin_index + ra_size) + return 0; + + begin_index = end_index - ra_size; + + ra_set_class(ra, RA_CLASS_BACKWARD); + ra_set_index(ra, begin_index, begin_index); + ra_set_size(ra, ra_size, 0); + + return 1; +} + +/* + * Readahead thrashing recovery. + */ +static inline unsigned long +thrashing_recovery_readahead(struct address_space *mapping, + struct file *filp, struct file_ra_state *ra, + pgoff_t index, unsigned long ra_max) +{ + unsigned long ra_size; + + if (readahead_debug_level && find_page(mapping, index - 1)) + ra_account(ra, RA_EVENT_READAHEAD_MUTILATE, + ra->readahead_index - index); + ra_account(ra, RA_EVENT_READAHEAD_THRASHING, + ra->readahead_index - index); + + /* + * Some thrashing occur in (ra_index, la_index], in which case the + * old read-ahead chunk is lost soon after the new one is allocated. + * Ensure that we recover all needed pages in the old chunk. + */ + if (index < ra->ra_index) + ra_size = ra->ra_index - index; + else { + /* After thrashing, we know the exact thrashing-threshold. */ + ra_size = ra_cache_hit(ra, 0); + + /* And we'd better be a bit conservative. */ + ra_size = ra_size * 3 / 4; + } + + if (ra_size > ra_max) + ra_size = ra_max; + + ra_set_class(ra, RA_CLASS_THRASHING); + ra_set_index(ra, index, index); + ra_set_size(ra, ra_size, ra_size / LOOKAHEAD_RATIO); + + return ra_dispatch(ra, mapping, filp); +} + +/* + * If there is a previous sequential read, it is likely to be another + * sequential read at the new position. + * Databases are known to have this seek-and-read-one-block pattern. + */ +static inline int +try_readahead_on_seek(struct file_ra_state *ra, pgoff_t index, + unsigned long ra_size, unsigned long ra_max) +{ + unsigned long hit0 = ra_cache_hit(ra, 0); + unsigned long hit1 = ra_cache_hit(ra, 1) + hit0; + unsigned long hit2 = ra_cache_hit(ra, 2); + unsigned long hit3 = ra_cache_hit(ra, 3); + + /* There's a previous read-ahead request? */ + if (!ra_has_index(ra, ra->prev_page)) + return 0; + + /* The previous read-ahead sequences have similiar sizes? */ + if (!(ra_size < hit1 && hit1 > hit2 / 2 && + hit2 > hit3 / 2 && + hit3 > hit1 / 2)) + return 0; + + hit1 = max(hit1, hit2); + + /* Follow the same prefetching direction. */ + if ((ra->flags & RA_CLASS_MASK) == RA_CLASS_BACKWARD) + index = ((index > hit1 - ra_size) ? index - hit1 + ra_size : 0); + + ra_size = min(hit1, ra_max); + + ra_set_class(ra, RA_CLASS_SEEK); + ra_set_index(ra, index, index); + ra_set_size(ra, ra_size, 0); + + return 1; +} + +/* + * ra_min is mainly determined by the size of cache memory. + * Table of concrete numbers for 4KB page size: + * inactive + free (MB): 4 8 16 32 64 128 256 512 1024 + * ra_min (KB): 16 16 16 16 20 24 32 48 64 + */ +static inline void get_readahead_bounds(struct file_ra_state *ra, + unsigned long *ra_min, + unsigned long *ra_max) +{ + unsigned long pages; + + pages = max_sane_readahead(KB(1024*1024)); + *ra_max = min(min(pages, 0xFFFFUL), ra->ra_pages); + *ra_min = min(min(MIN_RA_PAGES + (pages>>13), KB(128)), *ra_max/2); +} + +/** + * page_cache_readahead_adaptive - adaptive read-ahead main function + * @mapping, @ra, @filp: the same as page_cache_readahead() + * @prev_page: the page at @index-1, may be NULL to let the function find it + * @page: the page at @index, or NULL if non-present + * @begin_index, @index, @end_index: offsets into @mapping + * [@begin_index, @end_index) is the read the caller is performing + * @index indicates the page to be read now + * + * page_cache_readahead_adaptive() is the entry point of the adaptive + * read-ahead logic. It tries a set of methods in turn to determine the + * appropriate readahead action and submits the readahead I/O. + * + * The caller is expected to point ra->prev_page to the previously accessed + * page, and to call it on two conditions: + * 1. @page == NULL + * A cache miss happened, some pages have to be read in + * 2. @page != NULL && PageReadahead(@page) + * A look-ahead mark encountered, this is set by a previous read-ahead + * invocation to instruct the caller to give the function a chance to + * check up and do next read-ahead in advance. + */ +unsigned long +page_cache_readahead_adaptive(struct address_space *mapping, + struct file_ra_state *ra, struct file *filp, + struct page *prev_page, struct page *page, + pgoff_t begin_index, pgoff_t index, pgoff_t end_index) +{ + unsigned long size; + unsigned long ra_min; + unsigned long ra_max; + int ret; + + might_sleep(); + + if (page) { + if(!TestClearPageReadahead(page)) + return 0; + if (bdi_read_congested(mapping->backing_dev_info)) { + ra_account(ra, RA_EVENT_IO_CONGESTION, + end_index - index); + return 0; + } + if (laptop_mode && laptop_spinned_down()) { + if (!renew_lookahead(mapping, ra, index, + index + LAPTOP_POLL_INTERVAL)) + return 0; + } + } + + if (page) + ra_account(ra, RA_EVENT_LOOKAHEAD_HIT, + ra->readahead_index - ra->lookahead_index); + else if (index) + ra_account(ra, RA_EVENT_CACHE_MISS, end_index - begin_index); + + size = end_index - index; + get_readahead_bounds(ra, &ra_min, &ra_max); + + /* readahead disabled? */ + if (unlikely(!ra_max || !readahead_ratio)) { + size = max_sane_readahead(size); + goto readit; + } + + /* + * Start of file. + */ + if (index == 0) + return newfile_readahead(mapping, filp, ra, end_index, ra_min); + + /* + * State based sequential read-ahead. + */ + if (!disable_stateful_method && + index == ra->lookahead_index && ra_cache_hit_ok(ra)) + return state_based_readahead(mapping, filp, ra, page, + index, size, ra_max); + + /* + * Recover from possible thrashing. + */ + if (!page && index == ra->prev_page + 1 && ra_has_index(ra, index)) + return thrashing_recovery_readahead(mapping, filp, ra, + index, ra_max); + + /* + * Backward read-ahead. + */ + if (!page && begin_index == index && + try_read_backward(ra, index, size, ra_max)) + return ra_dispatch(ra, mapping, filp); + + /* + * Context based sequential read-ahead. + */ + ret = try_context_based_readahead(mapping, ra, prev_page, page, + index, ra_min, ra_max); + if (ret > 0) + return ra_dispatch(ra, mapping, filp); + if (ret < 0) + return 0; + + /* No action on look ahead time? */ + if (page) { + ra_account(ra, RA_EVENT_LOOKAHEAD_NOACTION, + ra->readahead_index - index); + return 0; + } + + /* + * Random read that follows a sequential one. + */ + if (try_readahead_on_seek(ra, index, size, ra_max)) + return ra_dispatch(ra, mapping, filp); + + /* + * Random read. + */ + if (size > ra_max) + size = ra_max; + +readit: + size = __do_page_cache_readahead(mapping, filp, index, size, 0); + + ra_account(ra, RA_EVENT_READRANDOM, size); + dprintk("readrandom(ino=%lu, pages=%lu, index=%lu-%lu-%lu) = %lu\n", + mapping->host->i_ino, mapping->nrpages, + begin_index, index, end_index, size); + + return size; +} + +/** + * readahead_cache_hit - adaptive read-ahead feedback function + * @ra: file_ra_state which holds the readahead state + * @page: the page just accessed + * + * readahead_cache_hit() is the feedback route of the adaptive read-ahead + * logic. It must be called on every access on the read-ahead pages. + */ +void fastcall readahead_cache_hit(struct file_ra_state *ra, struct page *page) +{ + if (PageActive(page) || PageReferenced(page)) + return; + + if (!PageUptodate(page)) + ra_account(ra, RA_EVENT_IO_BLOCK, 1); + + if (!ra_has_index(ra, page->index)) + return; + + ra->cache_hits++; + + if (page->index >= ra->ra_index) + ra_account(ra, RA_EVENT_READAHEAD_HIT, 1); + else + ra_account(ra, RA_EVENT_READAHEAD_HIT, -1); +} + +#endif /* CONFIG_ADAPTIVE_READAHEAD */ Index: linux-2.6.16-ck1/mm/swap.c =================================================================== --- linux-2.6.16-ck1.orig/mm/swap.c 2006-03-20 20:46:55.000000000 +1100 +++ linux-2.6.16-ck1/mm/swap.c 2006-03-20 20:47:04.000000000 +1100 @@ -128,6 +128,8 @@ void fastcall mark_page_accessed(struct ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); + if (PageLRU(page)) + inc_readahead_aging(); } } Index: linux-2.6.16-ck1/mm/vmscan.c =================================================================== --- linux-2.6.16-ck1.orig/mm/vmscan.c 2006-03-20 20:47:00.000000000 +1100 +++ linux-2.6.16-ck1/mm/vmscan.c 2006-03-20 20:47:04.000000000 +1100 @@ -458,6 +458,9 @@ static int shrink_list(struct list_head if (PageWriteback(page)) goto keep_locked; + if (!PageReferenced(page)) + inc_readahead_aging(); + referenced = page_referenced(page, 1); /* In active use or really unfreeable? Activate it. */ if (referenced && page_mapping_inuse(page))