Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/fs/mbcache.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 18734 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 18734 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * linux/fs/mbcache.c |
3 | * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> |
4 | */ |
5 | |
6 | /* |
7 | * Filesystem Meta Information Block Cache (mbcache) |
8 | * |
9 | * The mbcache caches blocks of block devices that need to be located |
10 | * by their device/block number, as well as by other criteria (such |
11 | * as the block's contents). |
12 | * |
13 | * There can only be one cache entry in a cache per device and block number. |
14 | * Additional indexes need not be unique in this sense. The number of |
15 | * additional indexes (=other criteria) can be hardwired at compile time |
16 | * or specified at cache create time. |
17 | * |
18 | * Each cache entry is of fixed size. An entry may be `valid' or `invalid' |
19 | * in the cache. A valid entry is in the main hash tables of the cache, |
20 | * and may also be in the lru list. An invalid entry is not in any hashes |
21 | * or lists. |
22 | * |
23 | * A valid cache entry is only in the lru list if no handles refer to it. |
24 | * Invalid cache entries will be freed when the last handle to the cache |
25 | * entry is released. Entries that cannot be freed immediately are put |
26 | * back on the lru list. |
27 | */ |
28 | |
29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> |
31 | |
32 | #include <linux/hash.h> |
33 | #include <linux/fs.h> |
34 | #include <linux/mm.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/sched.h> |
37 | #include <linux/init.h> |
38 | #include <linux/mbcache.h> |
39 | |
40 | |
41 | #ifdef MB_CACHE_DEBUG |
42 | # define mb_debug(f...) do { \ |
43 | printk(KERN_DEBUG f); \ |
44 | printk("\n"); \ |
45 | } while (0) |
46 | #define mb_assert(c) do { if (!(c)) \ |
47 | printk(KERN_ERR "assertion " #c " failed\n"); \ |
48 | } while(0) |
49 | #else |
50 | # define mb_debug(f...) do { } while(0) |
51 | # define mb_assert(c) do { } while(0) |
52 | #endif |
53 | #define mb_error(f...) do { \ |
54 | printk(KERN_ERR f); \ |
55 | printk("\n"); \ |
56 | } while(0) |
57 | |
58 | #define MB_CACHE_WRITER ((unsigned short)~0U >> 1) |
59 | |
60 | static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); |
61 | |
62 | MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>"); |
63 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); |
64 | MODULE_LICENSE("GPL"); |
65 | |
66 | EXPORT_SYMBOL(mb_cache_create); |
67 | EXPORT_SYMBOL(mb_cache_shrink); |
68 | EXPORT_SYMBOL(mb_cache_destroy); |
69 | EXPORT_SYMBOL(mb_cache_entry_alloc); |
70 | EXPORT_SYMBOL(mb_cache_entry_insert); |
71 | EXPORT_SYMBOL(mb_cache_entry_release); |
72 | EXPORT_SYMBOL(mb_cache_entry_free); |
73 | EXPORT_SYMBOL(mb_cache_entry_get); |
74 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) |
75 | EXPORT_SYMBOL(mb_cache_entry_find_first); |
76 | EXPORT_SYMBOL(mb_cache_entry_find_next); |
77 | #endif |
78 | |
79 | struct mb_cache { |
80 | struct list_head c_cache_list; |
81 | const char *c_name; |
82 | struct mb_cache_op c_op; |
83 | atomic_t c_entry_count; |
84 | int c_bucket_bits; |
85 | #ifndef MB_CACHE_INDEXES_COUNT |
86 | int c_indexes_count; |
87 | #endif |
88 | kmem_cache_t *c_entry_cache; |
89 | struct list_head *c_block_hash; |
90 | struct list_head *c_indexes_hash[0]; |
91 | }; |
92 | |
93 | |
94 | /* |
95 | * Global data: list of all mbcache's, lru list, and a spinlock for |
96 | * accessing cache data structures on SMP machines. The lru list is |
97 | * global across all mbcaches. |
98 | */ |
99 | |
100 | static LIST_HEAD(mb_cache_list); |
101 | static LIST_HEAD(mb_cache_lru_list); |
102 | static DEFINE_SPINLOCK(mb_cache_spinlock); |
103 | static struct shrinker *mb_shrinker; |
104 | |
105 | static inline int |
106 | mb_cache_indexes(struct mb_cache *cache) |
107 | { |
108 | #ifdef MB_CACHE_INDEXES_COUNT |
109 | return MB_CACHE_INDEXES_COUNT; |
110 | #else |
111 | return cache->c_indexes_count; |
112 | #endif |
113 | } |
114 | |
115 | /* |
116 | * What the mbcache registers as to get shrunk dynamically. |
117 | */ |
118 | |
119 | static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask); |
120 | |
121 | |
122 | static inline int |
123 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) |
124 | { |
125 | return !list_empty(&ce->e_block_list); |
126 | } |
127 | |
128 | |
129 | static inline void |
130 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) |
131 | { |
132 | int n; |
133 | |
134 | if (__mb_cache_entry_is_hashed(ce)) { |
135 | list_del_init(&ce->e_block_list); |
136 | for (n=0; n<mb_cache_indexes(ce->e_cache); n++) |
137 | list_del(&ce->e_indexes[n].o_list); |
138 | } |
139 | } |
140 | |
141 | |
142 | static inline void |
143 | __mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask) |
144 | { |
145 | struct mb_cache *cache = ce->e_cache; |
146 | |
147 | mb_assert(!(ce->e_used || ce->e_queued)); |
148 | if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { |
149 | /* free failed -- put back on the lru list |
150 | for freeing later. */ |
151 | spin_lock(&mb_cache_spinlock); |
152 | list_add(&ce->e_lru_list, &mb_cache_lru_list); |
153 | spin_unlock(&mb_cache_spinlock); |
154 | } else { |
155 | kmem_cache_free(cache->c_entry_cache, ce); |
156 | atomic_dec(&cache->c_entry_count); |
157 | } |
158 | } |
159 | |
160 | |
161 | static inline void |
162 | __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) |
163 | { |
164 | /* Wake up all processes queuing for this cache entry. */ |
165 | if (ce->e_queued) |
166 | wake_up_all(&mb_cache_queue); |
167 | if (ce->e_used >= MB_CACHE_WRITER) |
168 | ce->e_used -= MB_CACHE_WRITER; |
169 | ce->e_used--; |
170 | if (!(ce->e_used || ce->e_queued)) { |
171 | if (!__mb_cache_entry_is_hashed(ce)) |
172 | goto forget; |
173 | mb_assert(list_empty(&ce->e_lru_list)); |
174 | list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); |
175 | } |
176 | spin_unlock(&mb_cache_spinlock); |
177 | return; |
178 | forget: |
179 | spin_unlock(&mb_cache_spinlock); |
180 | __mb_cache_entry_forget(ce, GFP_KERNEL); |
181 | } |
182 | |
183 | |
184 | /* |
185 | * mb_cache_shrink_fn() memory pressure callback |
186 | * |
187 | * This function is called by the kernel memory management when memory |
188 | * gets low. |
189 | * |
190 | * @nr_to_scan: Number of objects to scan |
191 | * @gfp_mask: (ignored) |
192 | * |
193 | * Returns the number of objects which are present in the cache. |
194 | */ |
195 | static int |
196 | mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) |
197 | { |
198 | LIST_HEAD(free_list); |
199 | struct list_head *l, *ltmp; |
200 | int count = 0; |
201 | |
202 | spin_lock(&mb_cache_spinlock); |
203 | list_for_each(l, &mb_cache_list) { |
204 | struct mb_cache *cache = |
205 | list_entry(l, struct mb_cache, c_cache_list); |
206 | mb_debug("cache %s (%d)", cache->c_name, |
207 | atomic_read(&cache->c_entry_count)); |
208 | count += atomic_read(&cache->c_entry_count); |
209 | } |
210 | mb_debug("trying to free %d entries", nr_to_scan); |
211 | if (nr_to_scan == 0) { |
212 | spin_unlock(&mb_cache_spinlock); |
213 | goto out; |
214 | } |
215 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { |
216 | struct mb_cache_entry *ce = |
217 | list_entry(mb_cache_lru_list.next, |
218 | struct mb_cache_entry, e_lru_list); |
219 | list_move_tail(&ce->e_lru_list, &free_list); |
220 | __mb_cache_entry_unhash(ce); |
221 | } |
222 | spin_unlock(&mb_cache_spinlock); |
223 | list_for_each_safe(l, ltmp, &free_list) { |
224 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, |
225 | e_lru_list), gfp_mask); |
226 | } |
227 | out: |
228 | return (count / 100) * sysctl_vfs_cache_pressure; |
229 | } |
230 | |
231 | |
232 | /* |
233 | * mb_cache_create() create a new cache |
234 | * |
235 | * All entries in one cache are equal size. Cache entries may be from |
236 | * multiple devices. If this is the first mbcache created, registers |
237 | * the cache with kernel memory management. Returns NULL if no more |
238 | * memory was available. |
239 | * |
240 | * @name: name of the cache (informal) |
241 | * @cache_op: contains the callback called when freeing a cache entry |
242 | * @entry_size: The size of a cache entry, including |
243 | * struct mb_cache_entry |
244 | * @indexes_count: number of additional indexes in the cache. Must equal |
245 | * MB_CACHE_INDEXES_COUNT if the number of indexes is |
246 | * hardwired. |
247 | * @bucket_bits: log2(number of hash buckets) |
248 | */ |
249 | struct mb_cache * |
250 | mb_cache_create(const char *name, struct mb_cache_op *cache_op, |
251 | size_t entry_size, int indexes_count, int bucket_bits) |
252 | { |
253 | int m=0, n, bucket_count = 1 << bucket_bits; |
254 | struct mb_cache *cache = NULL; |
255 | |
256 | if(entry_size < sizeof(struct mb_cache_entry) + |
257 | indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0])) |
258 | return NULL; |
259 | |
260 | cache = kmalloc(sizeof(struct mb_cache) + |
261 | indexes_count * sizeof(struct list_head), GFP_KERNEL); |
262 | if (!cache) |
263 | goto fail; |
264 | cache->c_name = name; |
265 | cache->c_op.free = NULL; |
266 | if (cache_op) |
267 | cache->c_op.free = cache_op->free; |
268 | atomic_set(&cache->c_entry_count, 0); |
269 | cache->c_bucket_bits = bucket_bits; |
270 | #ifdef MB_CACHE_INDEXES_COUNT |
271 | mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT); |
272 | #else |
273 | cache->c_indexes_count = indexes_count; |
274 | #endif |
275 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), |
276 | GFP_KERNEL); |
277 | if (!cache->c_block_hash) |
278 | goto fail; |
279 | for (n=0; n<bucket_count; n++) |
280 | INIT_LIST_HEAD(&cache->c_block_hash[n]); |
281 | for (m=0; m<indexes_count; m++) { |
282 | cache->c_indexes_hash[m] = kmalloc(bucket_count * |
283 | sizeof(struct list_head), |
284 | GFP_KERNEL); |
285 | if (!cache->c_indexes_hash[m]) |
286 | goto fail; |
287 | for (n=0; n<bucket_count; n++) |
288 | INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); |
289 | } |
290 | cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, |
291 | SLAB_RECLAIM_ACCOUNT, NULL, NULL); |
292 | if (!cache->c_entry_cache) |
293 | goto fail; |
294 | |
295 | spin_lock(&mb_cache_spinlock); |
296 | list_add(&cache->c_cache_list, &mb_cache_list); |
297 | spin_unlock(&mb_cache_spinlock); |
298 | return cache; |
299 | |
300 | fail: |
301 | if (cache) { |
302 | while (--m >= 0) |
303 | kfree(cache->c_indexes_hash[m]); |
304 | if (cache->c_block_hash) |
305 | kfree(cache->c_block_hash); |
306 | kfree(cache); |
307 | } |
308 | return NULL; |
309 | } |
310 | |
311 | |
312 | /* |
313 | * mb_cache_shrink() |
314 | * |
315 | * Removes all cache entires of a device from the cache. All cache entries |
316 | * currently in use cannot be freed, and thus remain in the cache. All others |
317 | * are freed. |
318 | * |
319 | * @cache: which cache to shrink |
320 | * @bdev: which device's cache entries to shrink |
321 | */ |
322 | void |
323 | mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev) |
324 | { |
325 | LIST_HEAD(free_list); |
326 | struct list_head *l, *ltmp; |
327 | |
328 | spin_lock(&mb_cache_spinlock); |
329 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { |
330 | struct mb_cache_entry *ce = |
331 | list_entry(l, struct mb_cache_entry, e_lru_list); |
332 | if (ce->e_bdev == bdev) { |
333 | list_move_tail(&ce->e_lru_list, &free_list); |
334 | __mb_cache_entry_unhash(ce); |
335 | } |
336 | } |
337 | spin_unlock(&mb_cache_spinlock); |
338 | list_for_each_safe(l, ltmp, &free_list) { |
339 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, |
340 | e_lru_list), GFP_KERNEL); |
341 | } |
342 | } |
343 | |
344 | |
345 | /* |
346 | * mb_cache_destroy() |
347 | * |
348 | * Shrinks the cache to its minimum possible size (hopefully 0 entries), |
349 | * and then destroys it. If this was the last mbcache, un-registers the |
350 | * mbcache from kernel memory management. |
351 | */ |
352 | void |
353 | mb_cache_destroy(struct mb_cache *cache) |
354 | { |
355 | LIST_HEAD(free_list); |
356 | struct list_head *l, *ltmp; |
357 | int n; |
358 | |
359 | spin_lock(&mb_cache_spinlock); |
360 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { |
361 | struct mb_cache_entry *ce = |
362 | list_entry(l, struct mb_cache_entry, e_lru_list); |
363 | if (ce->e_cache == cache) { |
364 | list_move_tail(&ce->e_lru_list, &free_list); |
365 | __mb_cache_entry_unhash(ce); |
366 | } |
367 | } |
368 | list_del(&cache->c_cache_list); |
369 | spin_unlock(&mb_cache_spinlock); |
370 | |
371 | list_for_each_safe(l, ltmp, &free_list) { |
372 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, |
373 | e_lru_list), GFP_KERNEL); |
374 | } |
375 | |
376 | if (atomic_read(&cache->c_entry_count) > 0) { |
377 | mb_error("cache %s: %d orphaned entries", |
378 | cache->c_name, |
379 | atomic_read(&cache->c_entry_count)); |
380 | } |
381 | |
382 | kmem_cache_destroy(cache->c_entry_cache); |
383 | |
384 | for (n=0; n < mb_cache_indexes(cache); n++) |
385 | kfree(cache->c_indexes_hash[n]); |
386 | kfree(cache->c_block_hash); |
387 | kfree(cache); |
388 | } |
389 | |
390 | |
391 | /* |
392 | * mb_cache_entry_alloc() |
393 | * |
394 | * Allocates a new cache entry. The new entry will not be valid initially, |
395 | * and thus cannot be looked up yet. It should be filled with data, and |
396 | * then inserted into the cache using mb_cache_entry_insert(). Returns NULL |
397 | * if no more memory was available. |
398 | */ |
399 | struct mb_cache_entry * |
400 | mb_cache_entry_alloc(struct mb_cache *cache) |
401 | { |
402 | struct mb_cache_entry *ce; |
403 | |
404 | atomic_inc(&cache->c_entry_count); |
405 | ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); |
406 | if (ce) { |
407 | INIT_LIST_HEAD(&ce->e_lru_list); |
408 | INIT_LIST_HEAD(&ce->e_block_list); |
409 | ce->e_cache = cache; |
410 | ce->e_used = 1 + MB_CACHE_WRITER; |
411 | ce->e_queued = 0; |
412 | } |
413 | return ce; |
414 | } |
415 | |
416 | |
417 | /* |
418 | * mb_cache_entry_insert() |
419 | * |
420 | * Inserts an entry that was allocated using mb_cache_entry_alloc() into |
421 | * the cache. After this, the cache entry can be looked up, but is not yet |
422 | * in the lru list as the caller still holds a handle to it. Returns 0 on |
423 | * success, or -EBUSY if a cache entry for that device + inode exists |
424 | * already (this may happen after a failed lookup, but when another process |
425 | * has inserted the same cache entry in the meantime). |
426 | * |
427 | * @bdev: device the cache entry belongs to |
428 | * @block: block number |
429 | * @keys: array of additional keys. There must be indexes_count entries |
430 | * in the array (as specified when creating the cache). |
431 | */ |
432 | int |
433 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, |
434 | sector_t block, unsigned int keys[]) |
435 | { |
436 | struct mb_cache *cache = ce->e_cache; |
437 | unsigned int bucket; |
438 | struct list_head *l; |
439 | int error = -EBUSY, n; |
440 | |
441 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), |
442 | cache->c_bucket_bits); |
443 | spin_lock(&mb_cache_spinlock); |
444 | list_for_each_prev(l, &cache->c_block_hash[bucket]) { |
445 | struct mb_cache_entry *ce = |
446 | list_entry(l, struct mb_cache_entry, e_block_list); |
447 | if (ce->e_bdev == bdev && ce->e_block == block) |
448 | goto out; |
449 | } |
450 | __mb_cache_entry_unhash(ce); |
451 | ce->e_bdev = bdev; |
452 | ce->e_block = block; |
453 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); |
454 | for (n=0; n<mb_cache_indexes(cache); n++) { |
455 | ce->e_indexes[n].o_key = keys[n]; |
456 | bucket = hash_long(keys[n], cache->c_bucket_bits); |
457 | list_add(&ce->e_indexes[n].o_list, |
458 | &cache->c_indexes_hash[n][bucket]); |
459 | } |
460 | error = 0; |
461 | out: |
462 | spin_unlock(&mb_cache_spinlock); |
463 | return error; |
464 | } |
465 | |
466 | |
467 | /* |
468 | * mb_cache_entry_release() |
469 | * |
470 | * Release a handle to a cache entry. When the last handle to a cache entry |
471 | * is released it is either freed (if it is invalid) or otherwise inserted |
472 | * in to the lru list. |
473 | */ |
474 | void |
475 | mb_cache_entry_release(struct mb_cache_entry *ce) |
476 | { |
477 | spin_lock(&mb_cache_spinlock); |
478 | __mb_cache_entry_release_unlock(ce); |
479 | } |
480 | |
481 | |
482 | /* |
483 | * mb_cache_entry_free() |
484 | * |
485 | * This is equivalent to the sequence mb_cache_entry_takeout() -- |
486 | * mb_cache_entry_release(). |
487 | */ |
488 | void |
489 | mb_cache_entry_free(struct mb_cache_entry *ce) |
490 | { |
491 | spin_lock(&mb_cache_spinlock); |
492 | mb_assert(list_empty(&ce->e_lru_list)); |
493 | __mb_cache_entry_unhash(ce); |
494 | __mb_cache_entry_release_unlock(ce); |
495 | } |
496 | |
497 | |
498 | /* |
499 | * mb_cache_entry_get() |
500 | * |
501 | * Get a cache entry by device / block number. (There can only be one entry |
502 | * in the cache per device and block.) Returns NULL if no such cache entry |
503 | * exists. The returned cache entry is locked for exclusive access ("single |
504 | * writer"). |
505 | */ |
506 | struct mb_cache_entry * |
507 | mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, |
508 | sector_t block) |
509 | { |
510 | unsigned int bucket; |
511 | struct list_head *l; |
512 | struct mb_cache_entry *ce; |
513 | |
514 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), |
515 | cache->c_bucket_bits); |
516 | spin_lock(&mb_cache_spinlock); |
517 | list_for_each(l, &cache->c_block_hash[bucket]) { |
518 | ce = list_entry(l, struct mb_cache_entry, e_block_list); |
519 | if (ce->e_bdev == bdev && ce->e_block == block) { |
520 | DEFINE_WAIT(wait); |
521 | |
522 | if (!list_empty(&ce->e_lru_list)) |
523 | list_del_init(&ce->e_lru_list); |
524 | |
525 | while (ce->e_used > 0) { |
526 | ce->e_queued++; |
527 | prepare_to_wait(&mb_cache_queue, &wait, |
528 | TASK_UNINTERRUPTIBLE); |
529 | spin_unlock(&mb_cache_spinlock); |
530 | schedule(); |
531 | spin_lock(&mb_cache_spinlock); |
532 | ce->e_queued--; |
533 | } |
534 | finish_wait(&mb_cache_queue, &wait); |
535 | ce->e_used += 1 + MB_CACHE_WRITER; |
536 | |
537 | if (!__mb_cache_entry_is_hashed(ce)) { |
538 | __mb_cache_entry_release_unlock(ce); |
539 | return NULL; |
540 | } |
541 | goto cleanup; |
542 | } |
543 | } |
544 | ce = NULL; |
545 | |
546 | cleanup: |
547 | spin_unlock(&mb_cache_spinlock); |
548 | return ce; |
549 | } |
550 | |
551 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) |
552 | |
553 | static struct mb_cache_entry * |
554 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, |
555 | int index, struct block_device *bdev, unsigned int key) |
556 | { |
557 | while (l != head) { |
558 | struct mb_cache_entry *ce = |
559 | list_entry(l, struct mb_cache_entry, |
560 | e_indexes[index].o_list); |
561 | if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) { |
562 | DEFINE_WAIT(wait); |
563 | |
564 | if (!list_empty(&ce->e_lru_list)) |
565 | list_del_init(&ce->e_lru_list); |
566 | |
567 | /* Incrementing before holding the lock gives readers |
568 | priority over writers. */ |
569 | ce->e_used++; |
570 | while (ce->e_used >= MB_CACHE_WRITER) { |
571 | ce->e_queued++; |
572 | prepare_to_wait(&mb_cache_queue, &wait, |
573 | TASK_UNINTERRUPTIBLE); |
574 | spin_unlock(&mb_cache_spinlock); |
575 | schedule(); |
576 | spin_lock(&mb_cache_spinlock); |
577 | ce->e_queued--; |
578 | } |
579 | finish_wait(&mb_cache_queue, &wait); |
580 | |
581 | if (!__mb_cache_entry_is_hashed(ce)) { |
582 | __mb_cache_entry_release_unlock(ce); |
583 | spin_lock(&mb_cache_spinlock); |
584 | return ERR_PTR(-EAGAIN); |
585 | } |
586 | return ce; |
587 | } |
588 | l = l->next; |
589 | } |
590 | return NULL; |
591 | } |
592 | |
593 | |
594 | /* |
595 | * mb_cache_entry_find_first() |
596 | * |
597 | * Find the first cache entry on a given device with a certain key in |
598 | * an additional index. Additonal matches can be found with |
599 | * mb_cache_entry_find_next(). Returns NULL if no match was found. The |
600 | * returned cache entry is locked for shared access ("multiple readers"). |
601 | * |
602 | * @cache: the cache to search |
603 | * @index: the number of the additonal index to search (0<=index<indexes_count) |
604 | * @bdev: the device the cache entry should belong to |
605 | * @key: the key in the index |
606 | */ |
607 | struct mb_cache_entry * |
608 | mb_cache_entry_find_first(struct mb_cache *cache, int index, |
609 | struct block_device *bdev, unsigned int key) |
610 | { |
611 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); |
612 | struct list_head *l; |
613 | struct mb_cache_entry *ce; |
614 | |
615 | mb_assert(index < mb_cache_indexes(cache)); |
616 | spin_lock(&mb_cache_spinlock); |
617 | l = cache->c_indexes_hash[index][bucket].next; |
618 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], |
619 | index, bdev, key); |
620 | spin_unlock(&mb_cache_spinlock); |
621 | return ce; |
622 | } |
623 | |
624 | |
625 | /* |
626 | * mb_cache_entry_find_next() |
627 | * |
628 | * Find the next cache entry on a given device with a certain key in an |
629 | * additional index. Returns NULL if no match could be found. The previous |
630 | * entry is atomatically released, so that mb_cache_entry_find_next() can |
631 | * be called like this: |
632 | * |
633 | * entry = mb_cache_entry_find_first(); |
634 | * while (entry) { |
635 | * ... |
636 | * entry = mb_cache_entry_find_next(entry, ...); |
637 | * } |
638 | * |
639 | * @prev: The previous match |
640 | * @index: the number of the additonal index to search (0<=index<indexes_count) |
641 | * @bdev: the device the cache entry should belong to |
642 | * @key: the key in the index |
643 | */ |
644 | struct mb_cache_entry * |
645 | mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, |
646 | struct block_device *bdev, unsigned int key) |
647 | { |
648 | struct mb_cache *cache = prev->e_cache; |
649 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); |
650 | struct list_head *l; |
651 | struct mb_cache_entry *ce; |
652 | |
653 | mb_assert(index < mb_cache_indexes(cache)); |
654 | spin_lock(&mb_cache_spinlock); |
655 | l = prev->e_indexes[index].o_list.next; |
656 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], |
657 | index, bdev, key); |
658 | __mb_cache_entry_release_unlock(prev); |
659 | return ce; |
660 | } |
661 | |
662 | #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */ |
663 | |
664 | static int __init init_mbcache(void) |
665 | { |
666 | mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn); |
667 | return 0; |
668 | } |
669 | |
670 | static void __exit exit_mbcache(void) |
671 | { |
672 | remove_shrinker(mb_shrinker); |
673 | } |
674 | |
675 | module_init(init_mbcache) |
676 | module_exit(exit_mbcache) |
677 |