Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/mm/filemap.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 59319 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 59319 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * linux/mm/filemap.c |
3 | * |
4 | * Copyright (C) 1994-1999 Linus Torvalds |
5 | */ |
6 | |
7 | /* |
8 | * This file handles the generic file mmap semantics used by |
9 | * most "normal" filesystems (but you don't /have/ to use this: |
10 | * the NFS filesystem used to do this differently, for example) |
11 | */ |
12 | #include <linux/config.h> |
13 | #include <linux/module.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/compiler.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/aio.h> |
18 | #include <linux/kernel_stat.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/swap.h> |
21 | #include <linux/mman.h> |
22 | #include <linux/pagemap.h> |
23 | #include <linux/file.h> |
24 | #include <linux/uio.h> |
25 | #include <linux/hash.h> |
26 | #include <linux/writeback.h> |
27 | #include <linux/pagevec.h> |
28 | #include <linux/blkdev.h> |
29 | #include <linux/security.h> |
30 | #include <linux/syscalls.h> |
31 | /* |
32 | * FIXME: remove all knowledge of the buffer layer from the core VM |
33 | */ |
34 | #include <linux/buffer_head.h> /* for generic_osync_inode */ |
35 | |
36 | #include <asm/uaccess.h> |
37 | #include <asm/mman.h> |
38 | |
39 | /* |
40 | * Shared mappings implemented 30.11.1994. It's not fully working yet, |
41 | * though. |
42 | * |
43 | * Shared mappings now work. 15.8.1995 Bruno. |
44 | * |
45 | * finished 'unifying' the page and buffer cache and SMP-threaded the |
46 | * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> |
47 | * |
48 | * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> |
49 | */ |
50 | |
51 | /* |
52 | * Lock ordering: |
53 | * |
54 | * ->i_mmap_lock (vmtruncate) |
55 | * ->private_lock (__free_pte->__set_page_dirty_buffers) |
56 | * ->swap_list_lock |
57 | * ->swap_device_lock (exclusive_swap_page, others) |
58 | * ->mapping->tree_lock |
59 | * |
60 | * ->i_sem |
61 | * ->i_mmap_lock (truncate->unmap_mapping_range) |
62 | * |
63 | * ->mmap_sem |
64 | * ->i_mmap_lock |
65 | * ->page_table_lock (various places, mainly in mmap.c) |
66 | * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) |
67 | * |
68 | * ->mmap_sem |
69 | * ->lock_page (access_process_vm) |
70 | * |
71 | * ->mmap_sem |
72 | * ->i_sem (msync) |
73 | * |
74 | * ->i_sem |
75 | * ->i_alloc_sem (various) |
76 | * |
77 | * ->inode_lock |
78 | * ->sb_lock (fs/fs-writeback.c) |
79 | * ->mapping->tree_lock (__sync_single_inode) |
80 | * |
81 | * ->i_mmap_lock |
82 | * ->anon_vma.lock (vma_adjust) |
83 | * |
84 | * ->anon_vma.lock |
85 | * ->page_table_lock (anon_vma_prepare and various) |
86 | * |
87 | * ->page_table_lock |
88 | * ->swap_device_lock (try_to_unmap_one) |
89 | * ->private_lock (try_to_unmap_one) |
90 | * ->tree_lock (try_to_unmap_one) |
91 | * ->zone.lru_lock (follow_page->mark_page_accessed) |
92 | * ->private_lock (page_remove_rmap->set_page_dirty) |
93 | * ->tree_lock (page_remove_rmap->set_page_dirty) |
94 | * ->inode_lock (page_remove_rmap->set_page_dirty) |
95 | * ->inode_lock (zap_pte_range->set_page_dirty) |
96 | * ->private_lock (zap_pte_range->__set_page_dirty_buffers) |
97 | * |
98 | * ->task->proc_lock |
99 | * ->dcache_lock (proc_pid_lookup) |
100 | */ |
101 | |
102 | /* |
103 | * Remove a page from the page cache and free it. Caller has to make |
104 | * sure the page is locked and that nobody else uses it - or that usage |
105 | * is safe. The caller must hold a write_lock on the mapping's tree_lock. |
106 | */ |
107 | void __remove_from_page_cache(struct page *page) |
108 | { |
109 | struct address_space *mapping = page->mapping; |
110 | |
111 | radix_tree_delete(&mapping->page_tree, page->index); |
112 | page->mapping = NULL; |
113 | mapping->nrpages--; |
114 | pagecache_acct(-1); |
115 | } |
116 | |
117 | void remove_from_page_cache(struct page *page) |
118 | { |
119 | struct address_space *mapping = page->mapping; |
120 | |
121 | BUG_ON(!PageLocked(page)); |
122 | |
123 | write_lock_irq(&mapping->tree_lock); |
124 | __remove_from_page_cache(page); |
125 | write_unlock_irq(&mapping->tree_lock); |
126 | } |
127 | |
128 | static int sync_page(void *word) |
129 | { |
130 | struct address_space *mapping; |
131 | struct page *page; |
132 | |
133 | page = container_of((page_flags_t *)word, struct page, flags); |
134 | |
135 | /* |
136 | * page_mapping() is being called without PG_locked held. |
137 | * Some knowledge of the state and use of the page is used to |
138 | * reduce the requirements down to a memory barrier. |
139 | * The danger here is of a stale page_mapping() return value |
140 | * indicating a struct address_space different from the one it's |
141 | * associated with when it is associated with one. |
142 | * After smp_mb(), it's either the correct page_mapping() for |
143 | * the page, or an old page_mapping() and the page's own |
144 | * page_mapping() has gone NULL. |
145 | * The ->sync_page() address_space operation must tolerate |
146 | * page_mapping() going NULL. By an amazing coincidence, |
147 | * this comes about because none of the users of the page |
148 | * in the ->sync_page() methods make essential use of the |
149 | * page_mapping(), merely passing the page down to the backing |
150 | * device's unplug functions when it's non-NULL, which in turn |
151 | * ignore it for all cases but swap, where only page->private is |
152 | * of interest. When page_mapping() does go NULL, the entire |
153 | * call stack gracefully ignores the page and returns. |
154 | * -- wli |
155 | */ |
156 | smp_mb(); |
157 | mapping = page_mapping(page); |
158 | if (mapping && mapping->a_ops && mapping->a_ops->sync_page) |
159 | mapping->a_ops->sync_page(page); |
160 | io_schedule(); |
161 | return 0; |
162 | } |
163 | |
164 | /** |
165 | * filemap_fdatawrite_range - start writeback against all of a mapping's |
166 | * dirty pages that lie within the byte offsets <start, end> |
167 | * @mapping: address space structure to write |
168 | * @start: offset in bytes where the range starts |
169 | * @end: offset in bytes where the range ends |
170 | * @sync_mode: enable synchronous operation |
171 | * |
172 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as |
173 | * opposed to a regular memory * cleansing writeback. The difference between |
174 | * these two operations is that if a dirty page/buffer is encountered, it must |
175 | * be waited upon, and not just skipped over. |
176 | */ |
177 | static int __filemap_fdatawrite_range(struct address_space *mapping, |
178 | loff_t start, loff_t end, int sync_mode) |
179 | { |
180 | int ret; |
181 | struct writeback_control wbc = { |
182 | .sync_mode = sync_mode, |
183 | .nr_to_write = mapping->nrpages * 2, |
184 | .start = start, |
185 | .end = end, |
186 | }; |
187 | |
188 | if (!mapping_cap_writeback_dirty(mapping)) |
189 | return 0; |
190 | |
191 | ret = do_writepages(mapping, &wbc); |
192 | return ret; |
193 | } |
194 | |
195 | static inline int __filemap_fdatawrite(struct address_space *mapping, |
196 | int sync_mode) |
197 | { |
198 | return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); |
199 | } |
200 | |
201 | int filemap_fdatawrite(struct address_space *mapping) |
202 | { |
203 | return __filemap_fdatawrite(mapping, WB_SYNC_ALL); |
204 | } |
205 | EXPORT_SYMBOL(filemap_fdatawrite); |
206 | |
207 | static int filemap_fdatawrite_range(struct address_space *mapping, |
208 | loff_t start, loff_t end) |
209 | { |
210 | return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); |
211 | } |
212 | |
213 | /* |
214 | * This is a mostly non-blocking flush. Not suitable for data-integrity |
215 | * purposes - I/O may not be started against all dirty pages. |
216 | */ |
217 | int filemap_flush(struct address_space *mapping) |
218 | { |
219 | return __filemap_fdatawrite(mapping, WB_SYNC_NONE); |
220 | } |
221 | EXPORT_SYMBOL(filemap_flush); |
222 | |
223 | /* |
224 | * Wait for writeback to complete against pages indexed by start->end |
225 | * inclusive |
226 | */ |
227 | static int wait_on_page_writeback_range(struct address_space *mapping, |
228 | pgoff_t start, pgoff_t end) |
229 | { |
230 | struct pagevec pvec; |
231 | int nr_pages; |
232 | int ret = 0; |
233 | pgoff_t index; |
234 | |
235 | if (end < start) |
236 | return 0; |
237 | |
238 | pagevec_init(&pvec, 0); |
239 | index = start; |
240 | while ((index <= end) && |
241 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, |
242 | PAGECACHE_TAG_WRITEBACK, |
243 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { |
244 | unsigned i; |
245 | |
246 | for (i = 0; i < nr_pages; i++) { |
247 | struct page *page = pvec.pages[i]; |
248 | |
249 | /* until radix tree lookup accepts end_index */ |
250 | if (page->index > end) |
251 | continue; |
252 | |
253 | wait_on_page_writeback(page); |
254 | if (PageError(page)) |
255 | ret = -EIO; |
256 | } |
257 | pagevec_release(&pvec); |
258 | cond_resched(); |
259 | } |
260 | |
261 | /* Check for outstanding write errors */ |
262 | if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) |
263 | ret = -ENOSPC; |
264 | if (test_and_clear_bit(AS_EIO, &mapping->flags)) |
265 | ret = -EIO; |
266 | |
267 | return ret; |
268 | } |
269 | |
270 | /* |
271 | * Write and wait upon all the pages in the passed range. This is a "data |
272 | * integrity" operation. It waits upon in-flight writeout before starting and |
273 | * waiting upon new writeout. If there was an IO error, return it. |
274 | * |
275 | * We need to re-take i_sem during the generic_osync_inode list walk because |
276 | * it is otherwise livelockable. |
277 | */ |
278 | int sync_page_range(struct inode *inode, struct address_space *mapping, |
279 | loff_t pos, size_t count) |
280 | { |
281 | pgoff_t start = pos >> PAGE_CACHE_SHIFT; |
282 | pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; |
283 | int ret; |
284 | |
285 | if (!mapping_cap_writeback_dirty(mapping) || !count) |
286 | return 0; |
287 | ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); |
288 | if (ret == 0) { |
289 | down(&inode->i_sem); |
290 | ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); |
291 | up(&inode->i_sem); |
292 | } |
293 | if (ret == 0) |
294 | ret = wait_on_page_writeback_range(mapping, start, end); |
295 | return ret; |
296 | } |
297 | EXPORT_SYMBOL(sync_page_range); |
298 | |
299 | /* |
300 | * Note: Holding i_sem across sync_page_range_nolock is not a good idea |
301 | * as it forces O_SYNC writers to different parts of the same file |
302 | * to be serialised right until io completion. |
303 | */ |
304 | int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, |
305 | loff_t pos, size_t count) |
306 | { |
307 | pgoff_t start = pos >> PAGE_CACHE_SHIFT; |
308 | pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; |
309 | int ret; |
310 | |
311 | if (!mapping_cap_writeback_dirty(mapping) || !count) |
312 | return 0; |
313 | ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); |
314 | if (ret == 0) |
315 | ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); |
316 | if (ret == 0) |
317 | ret = wait_on_page_writeback_range(mapping, start, end); |
318 | return ret; |
319 | } |
320 | EXPORT_SYMBOL(sync_page_range_nolock); |
321 | |
322 | /** |
323 | * filemap_fdatawait - walk the list of under-writeback pages of the given |
324 | * address space and wait for all of them. |
325 | * |
326 | * @mapping: address space structure to wait for |
327 | */ |
328 | int filemap_fdatawait(struct address_space *mapping) |
329 | { |
330 | loff_t i_size = i_size_read(mapping->host); |
331 | |
332 | if (i_size == 0) |
333 | return 0; |
334 | |
335 | return wait_on_page_writeback_range(mapping, 0, |
336 | (i_size - 1) >> PAGE_CACHE_SHIFT); |
337 | } |
338 | EXPORT_SYMBOL(filemap_fdatawait); |
339 | |
340 | int filemap_write_and_wait(struct address_space *mapping) |
341 | { |
342 | int retval = 0; |
343 | |
344 | if (mapping->nrpages) { |
345 | retval = filemap_fdatawrite(mapping); |
346 | if (retval == 0) |
347 | retval = filemap_fdatawait(mapping); |
348 | } |
349 | return retval; |
350 | } |
351 | |
352 | int filemap_write_and_wait_range(struct address_space *mapping, |
353 | loff_t lstart, loff_t lend) |
354 | { |
355 | int retval = 0; |
356 | |
357 | if (mapping->nrpages) { |
358 | retval = __filemap_fdatawrite_range(mapping, lstart, lend, |
359 | WB_SYNC_ALL); |
360 | if (retval == 0) |
361 | retval = wait_on_page_writeback_range(mapping, |
362 | lstart >> PAGE_CACHE_SHIFT, |
363 | lend >> PAGE_CACHE_SHIFT); |
364 | } |
365 | return retval; |
366 | } |
367 | |
368 | /* |
369 | * This function is used to add newly allocated pagecache pages: |
370 | * the page is new, so we can just run SetPageLocked() against it. |
371 | * The other page state flags were set by rmqueue(). |
372 | * |
373 | * This function does not add the page to the LRU. The caller must do that. |
374 | */ |
375 | int add_to_page_cache(struct page *page, struct address_space *mapping, |
376 | pgoff_t offset, int gfp_mask) |
377 | { |
378 | int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); |
379 | |
380 | if (error == 0) { |
381 | write_lock_irq(&mapping->tree_lock); |
382 | error = radix_tree_insert(&mapping->page_tree, offset, page); |
383 | if (!error) { |
384 | page_cache_get(page); |
385 | SetPageLocked(page); |
386 | page->mapping = mapping; |
387 | page->index = offset; |
388 | mapping->nrpages++; |
389 | pagecache_acct(1); |
390 | } |
391 | write_unlock_irq(&mapping->tree_lock); |
392 | radix_tree_preload_end(); |
393 | } |
394 | return error; |
395 | } |
396 | |
397 | EXPORT_SYMBOL(add_to_page_cache); |
398 | |
399 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
400 | pgoff_t offset, int gfp_mask) |
401 | { |
402 | int ret = add_to_page_cache(page, mapping, offset, gfp_mask); |
403 | if (ret == 0) |
404 | lru_cache_add(page); |
405 | return ret; |
406 | } |
407 | |
408 | /* |
409 | * In order to wait for pages to become available there must be |
410 | * waitqueues associated with pages. By using a hash table of |
411 | * waitqueues where the bucket discipline is to maintain all |
412 | * waiters on the same queue and wake all when any of the pages |
413 | * become available, and for the woken contexts to check to be |
414 | * sure the appropriate page became available, this saves space |
415 | * at a cost of "thundering herd" phenomena during rare hash |
416 | * collisions. |
417 | */ |
418 | static wait_queue_head_t *page_waitqueue(struct page *page) |
419 | { |
420 | const struct zone *zone = page_zone(page); |
421 | |
422 | return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; |
423 | } |
424 | |
425 | static inline void wake_up_page(struct page *page, int bit) |
426 | { |
427 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); |
428 | } |
429 | |
430 | void fastcall wait_on_page_bit(struct page *page, int bit_nr) |
431 | { |
432 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); |
433 | |
434 | if (test_bit(bit_nr, &page->flags)) |
435 | __wait_on_bit(page_waitqueue(page), &wait, sync_page, |
436 | TASK_UNINTERRUPTIBLE); |
437 | } |
438 | EXPORT_SYMBOL(wait_on_page_bit); |
439 | |
440 | /** |
441 | * unlock_page() - unlock a locked page |
442 | * |
443 | * @page: the page |
444 | * |
445 | * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). |
446 | * Also wakes sleepers in wait_on_page_writeback() because the wakeup |
447 | * mechananism between PageLocked pages and PageWriteback pages is shared. |
448 | * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. |
449 | * |
450 | * The first mb is necessary to safely close the critical section opened by the |
451 | * TestSetPageLocked(), the second mb is necessary to enforce ordering between |
452 | * the clear_bit and the read of the waitqueue (to avoid SMP races with a |
453 | * parallel wait_on_page_locked()). |
454 | */ |
455 | void fastcall unlock_page(struct page *page) |
456 | { |
457 | smp_mb__before_clear_bit(); |
458 | if (!TestClearPageLocked(page)) |
459 | BUG(); |
460 | smp_mb__after_clear_bit(); |
461 | wake_up_page(page, PG_locked); |
462 | } |
463 | EXPORT_SYMBOL(unlock_page); |
464 | |
465 | /* |
466 | * End writeback against a page. |
467 | */ |
468 | void end_page_writeback(struct page *page) |
469 | { |
470 | if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { |
471 | if (!test_clear_page_writeback(page)) |
472 | BUG(); |
473 | } |
474 | smp_mb__after_clear_bit(); |
475 | wake_up_page(page, PG_writeback); |
476 | } |
477 | EXPORT_SYMBOL(end_page_writeback); |
478 | |
479 | /* |
480 | * Get a lock on the page, assuming we need to sleep to get it. |
481 | * |
482 | * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some |
483 | * random driver's requestfn sets TASK_RUNNING, we could busywait. However |
484 | * chances are that on the second loop, the block layer's plug list is empty, |
485 | * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. |
486 | */ |
487 | void fastcall __lock_page(struct page *page) |
488 | { |
489 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
490 | |
491 | __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, |
492 | TASK_UNINTERRUPTIBLE); |
493 | } |
494 | EXPORT_SYMBOL(__lock_page); |
495 | |
496 | /* |
497 | * a rather lightweight function, finding and getting a reference to a |
498 | * hashed page atomically. |
499 | */ |
500 | struct page * find_get_page(struct address_space *mapping, unsigned long offset) |
501 | { |
502 | struct page *page; |
503 | |
504 | read_lock_irq(&mapping->tree_lock); |
505 | page = radix_tree_lookup(&mapping->page_tree, offset); |
506 | if (page) |
507 | page_cache_get(page); |
508 | read_unlock_irq(&mapping->tree_lock); |
509 | return page; |
510 | } |
511 | |
512 | EXPORT_SYMBOL(find_get_page); |
513 | |
514 | /* |
515 | * Same as above, but trylock it instead of incrementing the count. |
516 | */ |
517 | struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) |
518 | { |
519 | struct page *page; |
520 | |
521 | read_lock_irq(&mapping->tree_lock); |
522 | page = radix_tree_lookup(&mapping->page_tree, offset); |
523 | if (page && TestSetPageLocked(page)) |
524 | page = NULL; |
525 | read_unlock_irq(&mapping->tree_lock); |
526 | return page; |
527 | } |
528 | |
529 | EXPORT_SYMBOL(find_trylock_page); |
530 | |
531 | /** |
532 | * find_lock_page - locate, pin and lock a pagecache page |
533 | * |
534 | * @mapping: the address_space to search |
535 | * @offset: the page index |
536 | * |
537 | * Locates the desired pagecache page, locks it, increments its reference |
538 | * count and returns its address. |
539 | * |
540 | * Returns zero if the page was not present. find_lock_page() may sleep. |
541 | */ |
542 | struct page *find_lock_page(struct address_space *mapping, |
543 | unsigned long offset) |
544 | { |
545 | struct page *page; |
546 | |
547 | read_lock_irq(&mapping->tree_lock); |
548 | repeat: |
549 | page = radix_tree_lookup(&mapping->page_tree, offset); |
550 | if (page) { |
551 | page_cache_get(page); |
552 | if (TestSetPageLocked(page)) { |
553 | read_unlock_irq(&mapping->tree_lock); |
554 | lock_page(page); |
555 | read_lock_irq(&mapping->tree_lock); |
556 | |
557 | /* Has the page been truncated while we slept? */ |
558 | if (page->mapping != mapping || page->index != offset) { |
559 | unlock_page(page); |
560 | page_cache_release(page); |
561 | goto repeat; |
562 | } |
563 | } |
564 | } |
565 | read_unlock_irq(&mapping->tree_lock); |
566 | return page; |
567 | } |
568 | |
569 | EXPORT_SYMBOL(find_lock_page); |
570 | |
571 | /** |
572 | * find_or_create_page - locate or add a pagecache page |
573 | * |
574 | * @mapping: the page's address_space |
575 | * @index: the page's index into the mapping |
576 | * @gfp_mask: page allocation mode |
577 | * |
578 | * Locates a page in the pagecache. If the page is not present, a new page |
579 | * is allocated using @gfp_mask and is added to the pagecache and to the VM's |
580 | * LRU list. The returned page is locked and has its reference count |
581 | * incremented. |
582 | * |
583 | * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic |
584 | * allocation! |
585 | * |
586 | * find_or_create_page() returns the desired page's address, or zero on |
587 | * memory exhaustion. |
588 | */ |
589 | struct page *find_or_create_page(struct address_space *mapping, |
590 | unsigned long index, unsigned int gfp_mask) |
591 | { |
592 | struct page *page, *cached_page = NULL; |
593 | int err; |
594 | repeat: |
595 | page = find_lock_page(mapping, index); |
596 | if (!page) { |
597 | if (!cached_page) { |
598 | cached_page = alloc_page(gfp_mask); |
599 | if (!cached_page) |
600 | return NULL; |
601 | } |
602 | err = add_to_page_cache_lru(cached_page, mapping, |
603 | index, gfp_mask); |
604 | if (!err) { |
605 | page = cached_page; |
606 | cached_page = NULL; |
607 | } else if (err == -EEXIST) |
608 | goto repeat; |
609 | } |
610 | if (cached_page) |
611 | page_cache_release(cached_page); |
612 | return page; |
613 | } |
614 | |
615 | EXPORT_SYMBOL(find_or_create_page); |
616 | |
617 | /** |
618 | * find_get_pages - gang pagecache lookup |
619 | * @mapping: The address_space to search |
620 | * @start: The starting page index |
621 | * @nr_pages: The maximum number of pages |
622 | * @pages: Where the resulting pages are placed |
623 | * |
624 | * find_get_pages() will search for and return a group of up to |
625 | * @nr_pages pages in the mapping. The pages are placed at @pages. |
626 | * find_get_pages() takes a reference against the returned pages. |
627 | * |
628 | * The search returns a group of mapping-contiguous pages with ascending |
629 | * indexes. There may be holes in the indices due to not-present pages. |
630 | * |
631 | * find_get_pages() returns the number of pages which were found. |
632 | */ |
633 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
634 | unsigned int nr_pages, struct page **pages) |
635 | { |
636 | unsigned int i; |
637 | unsigned int ret; |
638 | |
639 | read_lock_irq(&mapping->tree_lock); |
640 | ret = radix_tree_gang_lookup(&mapping->page_tree, |
641 | (void **)pages, start, nr_pages); |
642 | for (i = 0; i < ret; i++) |
643 | page_cache_get(pages[i]); |
644 | read_unlock_irq(&mapping->tree_lock); |
645 | return ret; |
646 | } |
647 | |
648 | /* |
649 | * Like find_get_pages, except we only return pages which are tagged with |
650 | * `tag'. We update *index to index the next page for the traversal. |
651 | */ |
652 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
653 | int tag, unsigned int nr_pages, struct page **pages) |
654 | { |
655 | unsigned int i; |
656 | unsigned int ret; |
657 | |
658 | read_lock_irq(&mapping->tree_lock); |
659 | ret = radix_tree_gang_lookup_tag(&mapping->page_tree, |
660 | (void **)pages, *index, nr_pages, tag); |
661 | for (i = 0; i < ret; i++) |
662 | page_cache_get(pages[i]); |
663 | if (ret) |
664 | *index = pages[ret - 1]->index + 1; |
665 | read_unlock_irq(&mapping->tree_lock); |
666 | return ret; |
667 | } |
668 | |
669 | /* |
670 | * Same as grab_cache_page, but do not wait if the page is unavailable. |
671 | * This is intended for speculative data generators, where the data can |
672 | * be regenerated if the page couldn't be grabbed. This routine should |
673 | * be safe to call while holding the lock for another page. |
674 | * |
675 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs |
676 | * and deadlock against the caller's locked page. |
677 | */ |
678 | struct page * |
679 | grab_cache_page_nowait(struct address_space *mapping, unsigned long index) |
680 | { |
681 | struct page *page = find_get_page(mapping, index); |
682 | unsigned int gfp_mask; |
683 | |
684 | if (page) { |
685 | if (!TestSetPageLocked(page)) |
686 | return page; |
687 | page_cache_release(page); |
688 | return NULL; |
689 | } |
690 | gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS; |
691 | page = alloc_pages(gfp_mask, 0); |
692 | if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) { |
693 | page_cache_release(page); |
694 | page = NULL; |
695 | } |
696 | return page; |
697 | } |
698 | |
699 | EXPORT_SYMBOL(grab_cache_page_nowait); |
700 | |
701 | /* |
702 | * This is a generic file read routine, and uses the |
703 | * mapping->a_ops->readpage() function for the actual low-level |
704 | * stuff. |
705 | * |
706 | * This is really ugly. But the goto's actually try to clarify some |
707 | * of the logic when it comes to error handling etc. |
708 | * |
709 | * Note the struct file* is only passed for the use of readpage. It may be |
710 | * NULL. |
711 | */ |
712 | void do_generic_mapping_read(struct address_space *mapping, |
713 | struct file_ra_state *_ra, |
714 | struct file *filp, |
715 | loff_t *ppos, |
716 | read_descriptor_t *desc, |
717 | read_actor_t actor) |
718 | { |
719 | struct inode *inode = mapping->host; |
720 | unsigned long index; |
721 | unsigned long end_index; |
722 | unsigned long offset; |
723 | unsigned long last_index; |
724 | unsigned long next_index; |
725 | unsigned long prev_index; |
726 | loff_t isize; |
727 | struct page *cached_page; |
728 | int error; |
729 | struct file_ra_state ra = *_ra; |
730 | |
731 | cached_page = NULL; |
732 | index = *ppos >> PAGE_CACHE_SHIFT; |
733 | next_index = index; |
734 | prev_index = ra.prev_page; |
735 | last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; |
736 | offset = *ppos & ~PAGE_CACHE_MASK; |
737 | |
738 | isize = i_size_read(inode); |
739 | if (!isize) |
740 | goto out; |
741 | |
742 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; |
743 | for (;;) { |
744 | struct page *page; |
745 | unsigned long nr, ret; |
746 | |
747 | /* nr is the maximum number of bytes to copy from this page */ |
748 | nr = PAGE_CACHE_SIZE; |
749 | if (index >= end_index) { |
750 | if (index > end_index) |
751 | goto out; |
752 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; |
753 | if (nr <= offset) { |
754 | goto out; |
755 | } |
756 | } |
757 | nr = nr - offset; |
758 | |
759 | cond_resched(); |
760 | if (index == next_index) |
761 | next_index = page_cache_readahead(mapping, &ra, filp, |
762 | index, last_index - index); |
763 | |
764 | find_page: |
765 | page = find_get_page(mapping, index); |
766 | if (unlikely(page == NULL)) { |
767 | handle_ra_miss(mapping, &ra, index); |
768 | goto no_cached_page; |
769 | } |
770 | if (!PageUptodate(page)) |
771 | goto page_not_up_to_date; |
772 | page_ok: |
773 | |
774 | /* If users can be writing to this page using arbitrary |
775 | * virtual addresses, take care about potential aliasing |
776 | * before reading the page on the kernel side. |
777 | */ |
778 | if (mapping_writably_mapped(mapping)) |
779 | flush_dcache_page(page); |
780 | |
781 | /* |
782 | * When (part of) the same page is read multiple times |
783 | * in succession, only mark it as accessed the first time. |
784 | */ |
785 | if (prev_index != index) |
786 | mark_page_accessed(page); |
787 | prev_index = index; |
788 | |
789 | /* |
790 | * Ok, we have the page, and it's up-to-date, so |
791 | * now we can copy it to user space... |
792 | * |
793 | * The actor routine returns how many bytes were actually used.. |
794 | * NOTE! This may not be the same as how much of a user buffer |
795 | * we filled up (we may be padding etc), so we can only update |
796 | * "pos" here (the actor routine has to update the user buffer |
797 | * pointers and the remaining count). |
798 | */ |
799 | ret = actor(desc, page, offset, nr); |
800 | offset += ret; |
801 | index += offset >> PAGE_CACHE_SHIFT; |
802 | offset &= ~PAGE_CACHE_MASK; |
803 | |
804 | page_cache_release(page); |
805 | if (ret == nr && desc->count) |
806 | continue; |
807 | goto out; |
808 | |
809 | page_not_up_to_date: |
810 | /* Get exclusive access to the page ... */ |
811 | lock_page(page); |
812 | |
813 | /* Did it get unhashed before we got the lock? */ |
814 | if (!page->mapping) { |
815 | unlock_page(page); |
816 | page_cache_release(page); |
817 | continue; |
818 | } |
819 | |
820 | /* Did somebody else fill it already? */ |
821 | if (PageUptodate(page)) { |
822 | unlock_page(page); |
823 | goto page_ok; |
824 | } |
825 | |
826 | readpage: |
827 | /* Start the actual read. The read will unlock the page. */ |
828 | error = mapping->a_ops->readpage(filp, page); |
829 | |
830 | if (unlikely(error)) |
831 | goto readpage_error; |
832 | |
833 | if (!PageUptodate(page)) { |
834 | lock_page(page); |
835 | if (!PageUptodate(page)) { |
836 | if (page->mapping == NULL) { |
837 | /* |
838 | * invalidate_inode_pages got it |
839 | */ |
840 | unlock_page(page); |
841 | page_cache_release(page); |
842 | goto find_page; |
843 | } |
844 | unlock_page(page); |
845 | error = -EIO; |
846 | goto readpage_error; |
847 | } |
848 | unlock_page(page); |
849 | } |
850 | |
851 | /* |
852 | * i_size must be checked after we have done ->readpage. |
853 | * |
854 | * Checking i_size after the readpage allows us to calculate |
855 | * the correct value for "nr", which means the zero-filled |
856 | * part of the page is not copied back to userspace (unless |
857 | * another truncate extends the file - this is desired though). |
858 | */ |
859 | isize = i_size_read(inode); |
860 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; |
861 | if (unlikely(!isize || index > end_index)) { |
862 | page_cache_release(page); |
863 | goto out; |
864 | } |
865 | |
866 | /* nr is the maximum number of bytes to copy from this page */ |
867 | nr = PAGE_CACHE_SIZE; |
868 | if (index == end_index) { |
869 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; |
870 | if (nr <= offset) { |
871 | page_cache_release(page); |
872 | goto out; |
873 | } |
874 | } |
875 | nr = nr - offset; |
876 | goto page_ok; |
877 | |
878 | readpage_error: |
879 | /* UHHUH! A synchronous read error occurred. Report it */ |
880 | desc->error = error; |
881 | page_cache_release(page); |
882 | goto out; |
883 | |
884 | no_cached_page: |
885 | /* |
886 | * Ok, it wasn't cached, so we need to create a new |
887 | * page.. |
888 | */ |
889 | if (!cached_page) { |
890 | cached_page = page_cache_alloc_cold(mapping); |
891 | if (!cached_page) { |
892 | desc->error = -ENOMEM; |
893 | goto out; |
894 | } |
895 | } |
896 | error = add_to_page_cache_lru(cached_page, mapping, |
897 | index, GFP_KERNEL); |
898 | if (error) { |
899 | if (error == -EEXIST) |
900 | goto find_page; |
901 | desc->error = error; |
902 | goto out; |
903 | } |
904 | page = cached_page; |
905 | cached_page = NULL; |
906 | goto readpage; |
907 | } |
908 | |
909 | out: |
910 | *_ra = ra; |
911 | |
912 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; |
913 | if (cached_page) |
914 | page_cache_release(cached_page); |
915 | if (filp) |
916 | file_accessed(filp); |
917 | } |
918 | |
919 | EXPORT_SYMBOL(do_generic_mapping_read); |
920 | |
921 | int file_read_actor(read_descriptor_t *desc, struct page *page, |
922 | unsigned long offset, unsigned long size) |
923 | { |
924 | char *kaddr; |
925 | unsigned long left, count = desc->count; |
926 | |
927 | if (size > count) |
928 | size = count; |
929 | |
930 | /* |
931 | * Faults on the destination of a read are common, so do it before |
932 | * taking the kmap. |
933 | */ |
934 | if (!fault_in_pages_writeable(desc->arg.buf, size)) { |
935 | kaddr = kmap_atomic(page, KM_USER0); |
936 | left = __copy_to_user_inatomic(desc->arg.buf, |
937 | kaddr + offset, size); |
938 | kunmap_atomic(kaddr, KM_USER0); |
939 | if (left == 0) |
940 | goto success; |
941 | } |
942 | |
943 | /* Do it the slow way */ |
944 | kaddr = kmap(page); |
945 | left = __copy_to_user(desc->arg.buf, kaddr + offset, size); |
946 | kunmap(page); |
947 | |
948 | if (left) { |
949 | size -= left; |
950 | desc->error = -EFAULT; |
951 | } |
952 | success: |
953 | desc->count = count - size; |
954 | desc->written += size; |
955 | desc->arg.buf += size; |
956 | return size; |
957 | } |
958 | |
959 | /* |
960 | * This is the "read()" routine for all filesystems |
961 | * that can use the page cache directly. |
962 | */ |
963 | ssize_t |
964 | __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, |
965 | unsigned long nr_segs, loff_t *ppos) |
966 | { |
967 | struct file *filp = iocb->ki_filp; |
968 | ssize_t retval; |
969 | unsigned long seg; |
970 | size_t count; |
971 | |
972 | count = 0; |
973 | for (seg = 0; seg < nr_segs; seg++) { |
974 | const struct iovec *iv = &iov[seg]; |
975 | |
976 | /* |
977 | * If any segment has a negative length, or the cumulative |
978 | * length ever wraps negative then return -EINVAL. |
979 | */ |
980 | count += iv->iov_len; |
981 | if (unlikely((ssize_t)(count|iv->iov_len) < 0)) |
982 | return -EINVAL; |
983 | if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) |
984 | continue; |
985 | if (seg == 0) |
986 | return -EFAULT; |
987 | nr_segs = seg; |
988 | count -= iv->iov_len; /* This segment is no good */ |
989 | break; |
990 | } |
991 | |
992 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ |
993 | if (filp->f_flags & O_DIRECT) { |
994 | loff_t pos = *ppos, size; |
995 | struct address_space *mapping; |
996 | struct inode *inode; |
997 | |
998 | mapping = filp->f_mapping; |
999 | inode = mapping->host; |
1000 | retval = 0; |
1001 | if (!count) |
1002 | goto out; /* skip atime */ |
1003 | size = i_size_read(inode); |
1004 | if (pos < size) { |
1005 | retval = generic_file_direct_IO(READ, iocb, |
1006 | iov, pos, nr_segs); |
1007 | if (retval > 0 && !is_sync_kiocb(iocb)) |
1008 | retval = -EIOCBQUEUED; |
1009 | if (retval > 0) |
1010 | *ppos = pos + retval; |
1011 | } |
1012 | file_accessed(filp); |
1013 | goto out; |
1014 | } |
1015 | |
1016 | retval = 0; |
1017 | if (count) { |
1018 | for (seg = 0; seg < nr_segs; seg++) { |
1019 | read_descriptor_t desc; |
1020 | |
1021 | desc.written = 0; |
1022 | desc.arg.buf = iov[seg].iov_base; |
1023 | desc.count = iov[seg].iov_len; |
1024 | if (desc.count == 0) |
1025 | continue; |
1026 | desc.error = 0; |
1027 | do_generic_file_read(filp,ppos,&desc,file_read_actor); |
1028 | retval += desc.written; |
1029 | if (!retval) { |
1030 | retval = desc.error; |
1031 | break; |
1032 | } |
1033 | } |
1034 | } |
1035 | out: |
1036 | return retval; |
1037 | } |
1038 | |
1039 | EXPORT_SYMBOL(__generic_file_aio_read); |
1040 | |
1041 | ssize_t |
1042 | generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) |
1043 | { |
1044 | struct iovec local_iov = { .iov_base = buf, .iov_len = count }; |
1045 | |
1046 | BUG_ON(iocb->ki_pos != pos); |
1047 | return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); |
1048 | } |
1049 | |
1050 | EXPORT_SYMBOL(generic_file_aio_read); |
1051 | |
1052 | ssize_t |
1053 | generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) |
1054 | { |
1055 | struct iovec local_iov = { .iov_base = buf, .iov_len = count }; |
1056 | struct kiocb kiocb; |
1057 | ssize_t ret; |
1058 | |
1059 | init_sync_kiocb(&kiocb, filp); |
1060 | ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos); |
1061 | if (-EIOCBQUEUED == ret) |
1062 | ret = wait_on_sync_kiocb(&kiocb); |
1063 | return ret; |
1064 | } |
1065 | |
1066 | EXPORT_SYMBOL(generic_file_read); |
1067 | |
1068 | int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) |
1069 | { |
1070 | ssize_t written; |
1071 | unsigned long count = desc->count; |
1072 | struct file *file = desc->arg.data; |
1073 | |
1074 | if (size > count) |
1075 | size = count; |
1076 | |
1077 | written = file->f_op->sendpage(file, page, offset, |
1078 | size, &file->f_pos, size<count); |
1079 | if (written < 0) { |
1080 | desc->error = written; |
1081 | written = 0; |
1082 | } |
1083 | desc->count = count - written; |
1084 | desc->written += written; |
1085 | return written; |
1086 | } |
1087 | |
1088 | ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, |
1089 | size_t count, read_actor_t actor, void *target) |
1090 | { |
1091 | read_descriptor_t desc; |
1092 | |
1093 | if (!count) |
1094 | return 0; |
1095 | |
1096 | desc.written = 0; |
1097 | desc.count = count; |
1098 | desc.arg.data = target; |
1099 | desc.error = 0; |
1100 | |
1101 | do_generic_file_read(in_file, ppos, &desc, actor); |
1102 | if (desc.written) |
1103 | return desc.written; |
1104 | return desc.error; |
1105 | } |
1106 | |
1107 | EXPORT_SYMBOL(generic_file_sendfile); |
1108 | |
1109 | static ssize_t |
1110 | do_readahead(struct address_space *mapping, struct file *filp, |
1111 | unsigned long index, unsigned long nr) |
1112 | { |
1113 | if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) |
1114 | return -EINVAL; |
1115 | |
1116 | force_page_cache_readahead(mapping, filp, index, |
1117 | max_sane_readahead(nr)); |
1118 | return 0; |
1119 | } |
1120 | |
1121 | asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) |
1122 | { |
1123 | ssize_t ret; |
1124 | struct file *file; |
1125 | |
1126 | ret = -EBADF; |
1127 | file = fget(fd); |
1128 | if (file) { |
1129 | if (file->f_mode & FMODE_READ) { |
1130 | struct address_space *mapping = file->f_mapping; |
1131 | unsigned long start = offset >> PAGE_CACHE_SHIFT; |
1132 | unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; |
1133 | unsigned long len = end - start + 1; |
1134 | ret = do_readahead(mapping, file, start, len); |
1135 | } |
1136 | fput(file); |
1137 | } |
1138 | return ret; |
1139 | } |
1140 | |
1141 | #ifdef CONFIG_MMU |
1142 | /* |
1143 | * This adds the requested page to the page cache if it isn't already there, |
1144 | * and schedules an I/O to read in its contents from disk. |
1145 | */ |
1146 | static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); |
1147 | static int fastcall page_cache_read(struct file * file, unsigned long offset) |
1148 | { |
1149 | struct address_space *mapping = file->f_mapping; |
1150 | struct page *page; |
1151 | int error; |
1152 | |
1153 | page = page_cache_alloc_cold(mapping); |
1154 | if (!page) |
1155 | return -ENOMEM; |
1156 | |
1157 | error = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); |
1158 | if (!error) { |
1159 | error = mapping->a_ops->readpage(file, page); |
1160 | page_cache_release(page); |
1161 | return error; |
1162 | } |
1163 | |
1164 | /* |
1165 | * We arrive here in the unlikely event that someone |
1166 | * raced with us and added our page to the cache first |
1167 | * or we are out of memory for radix-tree nodes. |
1168 | */ |
1169 | page_cache_release(page); |
1170 | return error == -EEXIST ? 0 : error; |
1171 | } |
1172 | |
1173 | #define MMAP_LOTSAMISS (100) |
1174 | |
1175 | /* |
1176 | * filemap_nopage() is invoked via the vma operations vector for a |
1177 | * mapped memory region to read in file data during a page fault. |
1178 | * |
1179 | * The goto's are kind of ugly, but this streamlines the normal case of having |
1180 | * it in the page cache, and handles the special cases reasonably without |
1181 | * having a lot of duplicated code. |
1182 | */ |
1183 | struct page *filemap_nopage(struct vm_area_struct *area, |
1184 | unsigned long address, int *type) |
1185 | { |
1186 | int error; |
1187 | struct file *file = area->vm_file; |
1188 | struct address_space *mapping = file->f_mapping; |
1189 | struct file_ra_state *ra = &file->f_ra; |
1190 | struct inode *inode = mapping->host; |
1191 | struct page *page; |
1192 | unsigned long size, pgoff; |
1193 | int did_readaround = 0, majmin = VM_FAULT_MINOR; |
1194 | |
1195 | pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; |
1196 | |
1197 | retry_all: |
1198 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
1199 | if (pgoff >= size) |
1200 | goto outside_data_content; |
1201 | |
1202 | /* If we don't want any read-ahead, don't bother */ |
1203 | if (VM_RandomReadHint(area)) |
1204 | goto no_cached_page; |
1205 | |
1206 | /* |
1207 | * The readahead code wants to be told about each and every page |
1208 | * so it can build and shrink its windows appropriately |
1209 | * |
1210 | * For sequential accesses, we use the generic readahead logic. |
1211 | */ |
1212 | if (VM_SequentialReadHint(area)) |
1213 | page_cache_readahead(mapping, ra, file, pgoff, 1); |
1214 | |
1215 | /* |
1216 | * Do we have something in the page cache already? |
1217 | */ |
1218 | retry_find: |
1219 | page = find_get_page(mapping, pgoff); |
1220 | if (!page) { |
1221 | unsigned long ra_pages; |
1222 | |
1223 | if (VM_SequentialReadHint(area)) { |
1224 | handle_ra_miss(mapping, ra, pgoff); |
1225 | goto no_cached_page; |
1226 | } |
1227 | ra->mmap_miss++; |
1228 | |
1229 | /* |
1230 | * Do we miss much more than hit in this file? If so, |
1231 | * stop bothering with read-ahead. It will only hurt. |
1232 | */ |
1233 | if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS) |
1234 | goto no_cached_page; |
1235 | |
1236 | /* |
1237 | * To keep the pgmajfault counter straight, we need to |
1238 | * check did_readaround, as this is an inner loop. |
1239 | */ |
1240 | if (!did_readaround) { |
1241 | majmin = VM_FAULT_MAJOR; |
1242 | inc_page_state(pgmajfault); |
1243 | } |
1244 | did_readaround = 1; |
1245 | ra_pages = max_sane_readahead(file->f_ra.ra_pages); |
1246 | if (ra_pages) { |
1247 | pgoff_t start = 0; |
1248 | |
1249 | if (pgoff > ra_pages / 2) |
1250 | start = pgoff - ra_pages / 2; |
1251 | do_page_cache_readahead(mapping, file, start, ra_pages); |
1252 | } |
1253 | page = find_get_page(mapping, pgoff); |
1254 | if (!page) |
1255 | goto no_cached_page; |
1256 | } |
1257 | |
1258 | if (!did_readaround) |
1259 | ra->mmap_hit++; |
1260 | |
1261 | /* |
1262 | * Ok, found a page in the page cache, now we need to check |
1263 | * that it's up-to-date. |
1264 | */ |
1265 | if (!PageUptodate(page)) |
1266 | goto page_not_uptodate; |
1267 | |
1268 | success: |
1269 | /* |
1270 | * Found the page and have a reference on it. |
1271 | */ |
1272 | mark_page_accessed(page); |
1273 | if (type) |
1274 | *type = majmin; |
1275 | return page; |
1276 | |
1277 | outside_data_content: |
1278 | /* |
1279 | * An external ptracer can access pages that normally aren't |
1280 | * accessible.. |
1281 | */ |
1282 | if (area->vm_mm == current->mm) |
1283 | return NULL; |
1284 | /* Fall through to the non-read-ahead case */ |
1285 | no_cached_page: |
1286 | /* |
1287 | * We're only likely to ever get here if MADV_RANDOM is in |
1288 | * effect. |
1289 | */ |
1290 | error = page_cache_read(file, pgoff); |
1291 | grab_swap_token(); |
1292 | |
1293 | /* |
1294 | * The page we want has now been added to the page cache. |
1295 | * In the unlikely event that someone removed it in the |
1296 | * meantime, we'll just come back here and read it again. |
1297 | */ |
1298 | if (error >= 0) |
1299 | goto retry_find; |
1300 | |
1301 | /* |
1302 | * An error return from page_cache_read can result if the |
1303 | * system is low on memory, or a problem occurs while trying |
1304 | * to schedule I/O. |
1305 | */ |
1306 | if (error == -ENOMEM) |
1307 | return NOPAGE_OOM; |
1308 | return NULL; |
1309 | |
1310 | page_not_uptodate: |
1311 | if (!did_readaround) { |
1312 | majmin = VM_FAULT_MAJOR; |
1313 | inc_page_state(pgmajfault); |
1314 | } |
1315 | lock_page(page); |
1316 | |
1317 | /* Did it get unhashed while we waited for it? */ |
1318 | if (!page->mapping) { |
1319 | unlock_page(page); |
1320 | page_cache_release(page); |
1321 | goto retry_all; |
1322 | } |
1323 | |
1324 | /* Did somebody else get it up-to-date? */ |
1325 | if (PageUptodate(page)) { |
1326 | unlock_page(page); |
1327 | goto success; |
1328 | } |
1329 | |
1330 | if (!mapping->a_ops->readpage(file, page)) { |
1331 | wait_on_page_locked(page); |
1332 | if (PageUptodate(page)) |
1333 | goto success; |
1334 | } |
1335 | |
1336 | /* |
1337 | * Umm, take care of errors if the page isn't up-to-date. |
1338 | * Try to re-read it _once_. We do this synchronously, |
1339 | * because there really aren't any performance issues here |
1340 | * and we need to check for errors. |
1341 | */ |
1342 | lock_page(page); |
1343 | |
1344 | /* Somebody truncated the page on us? */ |
1345 | if (!page->mapping) { |
1346 | unlock_page(page); |
1347 | page_cache_release(page); |
1348 | goto retry_all; |
1349 | } |
1350 | |
1351 | /* Somebody else successfully read it in? */ |
1352 | if (PageUptodate(page)) { |
1353 | unlock_page(page); |
1354 | goto success; |
1355 | } |
1356 | ClearPageError(page); |
1357 | if (!mapping->a_ops->readpage(file, page)) { |
1358 | wait_on_page_locked(page); |
1359 | if (PageUptodate(page)) |
1360 | goto success; |
1361 | } |
1362 | |
1363 | /* |
1364 | * Things didn't work out. Return zero to tell the |
1365 | * mm layer so, possibly freeing the page cache page first. |
1366 | */ |
1367 | page_cache_release(page); |
1368 | return NULL; |
1369 | } |
1370 | |
1371 | EXPORT_SYMBOL(filemap_nopage); |
1372 | |
1373 | static struct page * filemap_getpage(struct file *file, unsigned long pgoff, |
1374 | int nonblock) |
1375 | { |
1376 | struct address_space *mapping = file->f_mapping; |
1377 | struct page *page; |
1378 | int error; |
1379 | |
1380 | /* |
1381 | * Do we have something in the page cache already? |
1382 | */ |
1383 | retry_find: |
1384 | page = find_get_page(mapping, pgoff); |
1385 | if (!page) { |
1386 | if (nonblock) |
1387 | return NULL; |
1388 | goto no_cached_page; |
1389 | } |
1390 | |
1391 | /* |
1392 | * Ok, found a page in the page cache, now we need to check |
1393 | * that it's up-to-date. |
1394 | */ |
1395 | if (!PageUptodate(page)) { |
1396 | if (nonblock) { |
1397 | page_cache_release(page); |
1398 | return NULL; |
1399 | } |
1400 | goto page_not_uptodate; |
1401 | } |
1402 | |
1403 | success: |
1404 | /* |
1405 | * Found the page and have a reference on it. |
1406 | */ |
1407 | mark_page_accessed(page); |
1408 | return page; |
1409 | |
1410 | no_cached_page: |
1411 | error = page_cache_read(file, pgoff); |
1412 | |
1413 | /* |
1414 | * The page we want has now been added to the page cache. |
1415 | * In the unlikely event that someone removed it in the |
1416 | * meantime, we'll just come back here and read it again. |
1417 | */ |
1418 | if (error >= 0) |
1419 | goto retry_find; |
1420 | |
1421 | /* |
1422 | * An error return from page_cache_read can result if the |
1423 | * system is low on memory, or a problem occurs while trying |
1424 | * to schedule I/O. |
1425 | */ |
1426 | return NULL; |
1427 | |
1428 | page_not_uptodate: |
1429 | lock_page(page); |
1430 | |
1431 | /* Did it get unhashed while we waited for it? */ |
1432 | if (!page->mapping) { |
1433 | unlock_page(page); |
1434 | goto err; |
1435 | } |
1436 | |
1437 | /* Did somebody else get it up-to-date? */ |
1438 | if (PageUptodate(page)) { |
1439 | unlock_page(page); |
1440 | goto success; |
1441 | } |
1442 | |
1443 | if (!mapping->a_ops->readpage(file, page)) { |
1444 | wait_on_page_locked(page); |
1445 | if (PageUptodate(page)) |
1446 | goto success; |
1447 | } |
1448 | |
1449 | /* |
1450 | * Umm, take care of errors if the page isn't up-to-date. |
1451 | * Try to re-read it _once_. We do this synchronously, |
1452 | * because there really aren't any performance issues here |
1453 | * and we need to check for errors. |
1454 | */ |
1455 | lock_page(page); |
1456 | |
1457 | /* Somebody truncated the page on us? */ |
1458 | if (!page->mapping) { |
1459 | unlock_page(page); |
1460 | goto err; |
1461 | } |
1462 | /* Somebody else successfully read it in? */ |
1463 | if (PageUptodate(page)) { |
1464 | unlock_page(page); |
1465 | goto success; |
1466 | } |
1467 | |
1468 | ClearPageError(page); |
1469 | if (!mapping->a_ops->readpage(file, page)) { |
1470 | wait_on_page_locked(page); |
1471 | if (PageUptodate(page)) |
1472 | goto success; |
1473 | } |
1474 | |
1475 | /* |
1476 | * Things didn't work out. Return zero to tell the |
1477 | * mm layer so, possibly freeing the page cache page first. |
1478 | */ |
1479 | err: |
1480 | page_cache_release(page); |
1481 | |
1482 | return NULL; |
1483 | } |
1484 | |
1485 | int filemap_populate(struct vm_area_struct *vma, unsigned long addr, |
1486 | unsigned long len, pgprot_t prot, unsigned long pgoff, |
1487 | int nonblock) |
1488 | { |
1489 | struct file *file = vma->vm_file; |
1490 | struct address_space *mapping = file->f_mapping; |
1491 | struct inode *inode = mapping->host; |
1492 | unsigned long size; |
1493 | struct mm_struct *mm = vma->vm_mm; |
1494 | struct page *page; |
1495 | int err; |
1496 | |
1497 | if (!nonblock) |
1498 | force_page_cache_readahead(mapping, vma->vm_file, |
1499 | pgoff, len >> PAGE_CACHE_SHIFT); |
1500 | |
1501 | repeat: |
1502 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
1503 | if (pgoff + (len >> PAGE_CACHE_SHIFT) > size) |
1504 | return -EINVAL; |
1505 | |
1506 | page = filemap_getpage(file, pgoff, nonblock); |
1507 | if (!page && !nonblock) |
1508 | return -ENOMEM; |
1509 | if (page) { |
1510 | err = install_page(mm, vma, addr, page, prot); |
1511 | if (err) { |
1512 | page_cache_release(page); |
1513 | return err; |
1514 | } |
1515 | } else { |
1516 | err = install_file_pte(mm, vma, addr, pgoff, prot); |
1517 | if (err) |
1518 | return err; |
1519 | } |
1520 | |
1521 | len -= PAGE_SIZE; |
1522 | addr += PAGE_SIZE; |
1523 | pgoff++; |
1524 | if (len) |
1525 | goto repeat; |
1526 | |
1527 | return 0; |
1528 | } |
1529 | |
1530 | struct vm_operations_struct generic_file_vm_ops = { |
1531 | .nopage = filemap_nopage, |
1532 | .populate = filemap_populate, |
1533 | }; |
1534 | |
1535 | /* This is used for a general mmap of a disk file */ |
1536 | |
1537 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) |
1538 | { |
1539 | struct address_space *mapping = file->f_mapping; |
1540 | |
1541 | if (!mapping->a_ops->readpage) |
1542 | return -ENOEXEC; |
1543 | file_accessed(file); |
1544 | vma->vm_ops = &generic_file_vm_ops; |
1545 | return 0; |
1546 | } |
1547 | EXPORT_SYMBOL(filemap_populate); |
1548 | |
1549 | /* |
1550 | * This is for filesystems which do not implement ->writepage. |
1551 | */ |
1552 | int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) |
1553 | { |
1554 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) |
1555 | return -EINVAL; |
1556 | return generic_file_mmap(file, vma); |
1557 | } |
1558 | #else |
1559 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) |
1560 | { |
1561 | return -ENOSYS; |
1562 | } |
1563 | int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) |
1564 | { |
1565 | return -ENOSYS; |
1566 | } |
1567 | #endif /* CONFIG_MMU */ |
1568 | |
1569 | EXPORT_SYMBOL(generic_file_mmap); |
1570 | EXPORT_SYMBOL(generic_file_readonly_mmap); |
1571 | |
1572 | static inline struct page *__read_cache_page(struct address_space *mapping, |
1573 | unsigned long index, |
1574 | int (*filler)(void *,struct page*), |
1575 | void *data) |
1576 | { |
1577 | struct page *page, *cached_page = NULL; |
1578 | int err; |
1579 | repeat: |
1580 | page = find_get_page(mapping, index); |
1581 | if (!page) { |
1582 | if (!cached_page) { |
1583 | cached_page = page_cache_alloc_cold(mapping); |
1584 | if (!cached_page) |
1585 | return ERR_PTR(-ENOMEM); |
1586 | } |
1587 | err = add_to_page_cache_lru(cached_page, mapping, |
1588 | index, GFP_KERNEL); |
1589 | if (err == -EEXIST) |
1590 | goto repeat; |
1591 | if (err < 0) { |
1592 | /* Presumably ENOMEM for radix tree node */ |
1593 | page_cache_release(cached_page); |
1594 | return ERR_PTR(err); |
1595 | } |
1596 | page = cached_page; |
1597 | cached_page = NULL; |
1598 | err = filler(data, page); |
1599 | if (err < 0) { |
1600 | page_cache_release(page); |
1601 | page = ERR_PTR(err); |
1602 | } |
1603 | } |
1604 | if (cached_page) |
1605 | page_cache_release(cached_page); |
1606 | return page; |
1607 | } |
1608 | |
1609 | /* |
1610 | * Read into the page cache. If a page already exists, |
1611 | * and PageUptodate() is not set, try to fill the page. |
1612 | */ |
1613 | struct page *read_cache_page(struct address_space *mapping, |
1614 | unsigned long index, |
1615 | int (*filler)(void *,struct page*), |
1616 | void *data) |
1617 | { |
1618 | struct page *page; |
1619 | int err; |
1620 | |
1621 | retry: |
1622 | page = __read_cache_page(mapping, index, filler, data); |
1623 | if (IS_ERR(page)) |
1624 | goto out; |
1625 | mark_page_accessed(page); |
1626 | if (PageUptodate(page)) |
1627 | goto out; |
1628 | |
1629 | lock_page(page); |
1630 | if (!page->mapping) { |
1631 | unlock_page(page); |
1632 | page_cache_release(page); |
1633 | goto retry; |
1634 | } |
1635 | if (PageUptodate(page)) { |
1636 | unlock_page(page); |
1637 | goto out; |
1638 | } |
1639 | err = filler(data, page); |
1640 | if (err < 0) { |
1641 | page_cache_release(page); |
1642 | page = ERR_PTR(err); |
1643 | } |
1644 | out: |
1645 | return page; |
1646 | } |
1647 | |
1648 | EXPORT_SYMBOL(read_cache_page); |
1649 | |
1650 | /* |
1651 | * If the page was newly created, increment its refcount and add it to the |
1652 | * caller's lru-buffering pagevec. This function is specifically for |
1653 | * generic_file_write(). |
1654 | */ |
1655 | static inline struct page * |
1656 | __grab_cache_page(struct address_space *mapping, unsigned long index, |
1657 | struct page **cached_page, struct pagevec *lru_pvec) |
1658 | { |
1659 | int err; |
1660 | struct page *page; |
1661 | repeat: |
1662 | page = find_lock_page(mapping, index); |
1663 | if (!page) { |
1664 | if (!*cached_page) { |
1665 | *cached_page = page_cache_alloc(mapping); |
1666 | if (!*cached_page) |
1667 | return NULL; |
1668 | } |
1669 | err = add_to_page_cache(*cached_page, mapping, |
1670 | index, GFP_KERNEL); |
1671 | if (err == -EEXIST) |
1672 | goto repeat; |
1673 | if (err == 0) { |
1674 | page = *cached_page; |
1675 | page_cache_get(page); |
1676 | if (!pagevec_add(lru_pvec, page)) |
1677 | __pagevec_lru_add(lru_pvec); |
1678 | *cached_page = NULL; |
1679 | } |
1680 | } |
1681 | return page; |
1682 | } |
1683 | |
1684 | /* |
1685 | * The logic we want is |
1686 | * |
1687 | * if suid or (sgid and xgrp) |
1688 | * remove privs |
1689 | */ |
1690 | int remove_suid(struct dentry *dentry) |
1691 | { |
1692 | mode_t mode = dentry->d_inode->i_mode; |
1693 | int kill = 0; |
1694 | int result = 0; |
1695 | |
1696 | /* suid always must be killed */ |
1697 | if (unlikely(mode & S_ISUID)) |
1698 | kill = ATTR_KILL_SUID; |
1699 | |
1700 | /* |
1701 | * sgid without any exec bits is just a mandatory locking mark; leave |
1702 | * it alone. If some exec bits are set, it's a real sgid; kill it. |
1703 | */ |
1704 | if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) |
1705 | kill |= ATTR_KILL_SGID; |
1706 | |
1707 | if (unlikely(kill && !capable(CAP_FSETID))) { |
1708 | struct iattr newattrs; |
1709 | |
1710 | newattrs.ia_valid = ATTR_FORCE | kill; |
1711 | result = notify_change(dentry, &newattrs); |
1712 | } |
1713 | return result; |
1714 | } |
1715 | EXPORT_SYMBOL(remove_suid); |
1716 | |
1717 | /* |
1718 | * Copy as much as we can into the page and return the number of bytes which |
1719 | * were sucessfully copied. If a fault is encountered then clear the page |
1720 | * out to (offset+bytes) and return the number of bytes which were copied. |
1721 | */ |
1722 | static inline size_t |
1723 | filemap_copy_from_user(struct page *page, unsigned long offset, |
1724 | const char __user *buf, unsigned bytes) |
1725 | { |
1726 | char *kaddr; |
1727 | int left; |
1728 | |
1729 | kaddr = kmap_atomic(page, KM_USER0); |
1730 | left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); |
1731 | kunmap_atomic(kaddr, KM_USER0); |
1732 | |
1733 | if (left != 0) { |
1734 | /* Do it the slow way */ |
1735 | kaddr = kmap(page); |
1736 | left = __copy_from_user(kaddr + offset, buf, bytes); |
1737 | kunmap(page); |
1738 | } |
1739 | return bytes - left; |
1740 | } |
1741 | |
1742 | static size_t |
1743 | __filemap_copy_from_user_iovec(char *vaddr, |
1744 | const struct iovec *iov, size_t base, size_t bytes) |
1745 | { |
1746 | size_t copied = 0, left = 0; |
1747 | |
1748 | while (bytes) { |
1749 | char __user *buf = iov->iov_base + base; |
1750 | int copy = min(bytes, iov->iov_len - base); |
1751 | |
1752 | base = 0; |
1753 | left = __copy_from_user_inatomic(vaddr, buf, copy); |
1754 | copied += copy; |
1755 | bytes -= copy; |
1756 | vaddr += copy; |
1757 | iov++; |
1758 | |
1759 | if (unlikely(left)) { |
1760 | /* zero the rest of the target like __copy_from_user */ |
1761 | if (bytes) |
1762 | memset(vaddr, 0, bytes); |
1763 | break; |
1764 | } |
1765 | } |
1766 | return copied - left; |
1767 | } |
1768 | |
1769 | /* |
1770 | * This has the same sideeffects and return value as filemap_copy_from_user(). |
1771 | * The difference is that on a fault we need to memset the remainder of the |
1772 | * page (out to offset+bytes), to emulate filemap_copy_from_user()'s |
1773 | * single-segment behaviour. |
1774 | */ |
1775 | static inline size_t |
1776 | filemap_copy_from_user_iovec(struct page *page, unsigned long offset, |
1777 | const struct iovec *iov, size_t base, size_t bytes) |
1778 | { |
1779 | char *kaddr; |
1780 | size_t copied; |
1781 | |
1782 | kaddr = kmap_atomic(page, KM_USER0); |
1783 | copied = __filemap_copy_from_user_iovec(kaddr + offset, iov, |
1784 | base, bytes); |
1785 | kunmap_atomic(kaddr, KM_USER0); |
1786 | if (copied != bytes) { |
1787 | kaddr = kmap(page); |
1788 | copied = __filemap_copy_from_user_iovec(kaddr + offset, iov, |
1789 | base, bytes); |
1790 | kunmap(page); |
1791 | } |
1792 | return copied; |
1793 | } |
1794 | |
1795 | static inline void |
1796 | filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes) |
1797 | { |
1798 | const struct iovec *iov = *iovp; |
1799 | size_t base = *basep; |
1800 | |
1801 | while (bytes) { |
1802 | int copy = min(bytes, iov->iov_len - base); |
1803 | |
1804 | bytes -= copy; |
1805 | base += copy; |
1806 | if (iov->iov_len == base) { |
1807 | iov++; |
1808 | base = 0; |
1809 | } |
1810 | } |
1811 | *iovp = iov; |
1812 | *basep = base; |
1813 | } |
1814 | |
1815 | /* |
1816 | * Performs necessary checks before doing a write |
1817 | * |
1818 | * Can adjust writing position aor amount of bytes to write. |
1819 | * Returns appropriate error code that caller should return or |
1820 | * zero in case that write should be allowed. |
1821 | */ |
1822 | inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) |
1823 | { |
1824 | struct inode *inode = file->f_mapping->host; |
1825 | unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; |
1826 | |
1827 | if (unlikely(*pos < 0)) |
1828 | return -EINVAL; |
1829 | |
1830 | if (unlikely(file->f_error)) { |
1831 | int err = file->f_error; |
1832 | file->f_error = 0; |
1833 | return err; |
1834 | } |
1835 | |
1836 | if (!isblk) { |
1837 | /* FIXME: this is for backwards compatibility with 2.4 */ |
1838 | if (file->f_flags & O_APPEND) |
1839 | *pos = i_size_read(inode); |
1840 | |
1841 | if (limit != RLIM_INFINITY) { |
1842 | if (*pos >= limit) { |
1843 | send_sig(SIGXFSZ, current, 0); |
1844 | return -EFBIG; |
1845 | } |
1846 | if (*count > limit - (typeof(limit))*pos) { |
1847 | *count = limit - (typeof(limit))*pos; |
1848 | } |
1849 | } |
1850 | } |
1851 | |
1852 | /* |
1853 | * LFS rule |
1854 | */ |
1855 | if (unlikely(*pos + *count > MAX_NON_LFS && |
1856 | !(file->f_flags & O_LARGEFILE))) { |
1857 | if (*pos >= MAX_NON_LFS) { |
1858 | send_sig(SIGXFSZ, current, 0); |
1859 | return -EFBIG; |
1860 | } |
1861 | if (*count > MAX_NON_LFS - (unsigned long)*pos) { |
1862 | *count = MAX_NON_LFS - (unsigned long)*pos; |
1863 | } |
1864 | } |
1865 | |
1866 | /* |
1867 | * Are we about to exceed the fs block limit ? |
1868 | * |
1869 | * If we have written data it becomes a short write. If we have |
1870 | * exceeded without writing data we send a signal and return EFBIG. |
1871 | * Linus frestrict idea will clean these up nicely.. |
1872 | */ |
1873 | if (likely(!isblk)) { |
1874 | if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { |
1875 | if (*count || *pos > inode->i_sb->s_maxbytes) { |
1876 | send_sig(SIGXFSZ, current, 0); |
1877 | return -EFBIG; |
1878 | } |
1879 | /* zero-length writes at ->s_maxbytes are OK */ |
1880 | } |
1881 | |
1882 | if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) |
1883 | *count = inode->i_sb->s_maxbytes - *pos; |
1884 | } else { |
1885 | loff_t isize; |
1886 | if (bdev_read_only(I_BDEV(inode))) |
1887 | return -EPERM; |
1888 | isize = i_size_read(inode); |
1889 | if (*pos >= isize) { |
1890 | if (*count || *pos > isize) |
1891 | return -ENOSPC; |
1892 | } |
1893 | |
1894 | if (*pos + *count > isize) |
1895 | *count = isize - *pos; |
1896 | } |
1897 | return 0; |
1898 | } |
1899 | EXPORT_SYMBOL(generic_write_checks); |
1900 | |
1901 | ssize_t |
1902 | generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, |
1903 | unsigned long *nr_segs, loff_t pos, loff_t *ppos, |
1904 | size_t count, size_t ocount) |
1905 | { |
1906 | struct file *file = iocb->ki_filp; |
1907 | struct address_space *mapping = file->f_mapping; |
1908 | struct inode *inode = mapping->host; |
1909 | ssize_t written; |
1910 | |
1911 | if (count != ocount) |
1912 | *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); |
1913 | |
1914 | written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); |
1915 | if (written > 0) { |
1916 | loff_t end = pos + written; |
1917 | if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { |
1918 | i_size_write(inode, end); |
1919 | mark_inode_dirty(inode); |
1920 | } |
1921 | *ppos = end; |
1922 | } |
1923 | |
1924 | /* |
1925 | * Sync the fs metadata but not the minor inode changes and |
1926 | * of course not the data as we did direct DMA for the IO. |
1927 | * i_sem is held, which protects generic_osync_inode() from |
1928 | * livelocking. |
1929 | */ |
1930 | if (written >= 0 && file->f_flags & O_SYNC) |
1931 | generic_osync_inode(inode, mapping, OSYNC_METADATA); |
1932 | if (written == count && !is_sync_kiocb(iocb)) |
1933 | written = -EIOCBQUEUED; |
1934 | return written; |
1935 | } |
1936 | EXPORT_SYMBOL(generic_file_direct_write); |
1937 | |
1938 | ssize_t |
1939 | generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, |
1940 | unsigned long nr_segs, loff_t pos, loff_t *ppos, |
1941 | size_t count, ssize_t written) |
1942 | { |
1943 | struct file *file = iocb->ki_filp; |
1944 | struct address_space * mapping = file->f_mapping; |
1945 | struct address_space_operations *a_ops = mapping->a_ops; |
1946 | struct inode *inode = mapping->host; |
1947 | long status = 0; |
1948 | struct page *page; |
1949 | struct page *cached_page = NULL; |
1950 | size_t bytes; |
1951 | struct pagevec lru_pvec; |
1952 | const struct iovec *cur_iov = iov; /* current iovec */ |
1953 | size_t iov_base = 0; /* offset in the current iovec */ |
1954 | char __user *buf; |
1955 | |
1956 | pagevec_init(&lru_pvec, 0); |
1957 | |
1958 | /* |
1959 | * handle partial DIO write. Adjust cur_iov if needed. |
1960 | */ |
1961 | if (likely(nr_segs == 1)) |
1962 | buf = iov->iov_base + written; |
1963 | else { |
1964 | filemap_set_next_iovec(&cur_iov, &iov_base, written); |
1965 | buf = cur_iov->iov_base + iov_base; |
1966 | } |
1967 | |
1968 | do { |
1969 | unsigned long index; |
1970 | unsigned long offset; |
1971 | unsigned long maxlen; |
1972 | size_t copied; |
1973 | |
1974 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ |
1975 | index = pos >> PAGE_CACHE_SHIFT; |
1976 | bytes = PAGE_CACHE_SIZE - offset; |
1977 | if (bytes > count) |
1978 | bytes = count; |
1979 | |
1980 | /* |
1981 | * Bring in the user page that we will copy from _first_. |
1982 | * Otherwise there's a nasty deadlock on copying from the |
1983 | * same page as we're writing to, without it being marked |
1984 | * up-to-date. |
1985 | */ |
1986 | maxlen = cur_iov->iov_len - iov_base; |
1987 | if (maxlen > bytes) |
1988 | maxlen = bytes; |
1989 | fault_in_pages_readable(buf, maxlen); |
1990 | |
1991 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); |
1992 | if (!page) { |
1993 | status = -ENOMEM; |
1994 | break; |
1995 | } |
1996 | |
1997 | status = a_ops->prepare_write(file, page, offset, offset+bytes); |
1998 | if (unlikely(status)) { |
1999 | loff_t isize = i_size_read(inode); |
2000 | /* |
2001 | * prepare_write() may have instantiated a few blocks |
2002 | * outside i_size. Trim these off again. |
2003 | */ |
2004 | unlock_page(page); |
2005 | page_cache_release(page); |
2006 | if (pos + bytes > isize) |
2007 | vmtruncate(inode, isize); |
2008 | break; |
2009 | } |
2010 | if (likely(nr_segs == 1)) |
2011 | copied = filemap_copy_from_user(page, offset, |
2012 | buf, bytes); |
2013 | else |
2014 | copied = filemap_copy_from_user_iovec(page, offset, |
2015 | cur_iov, iov_base, bytes); |
2016 | flush_dcache_page(page); |
2017 | status = a_ops->commit_write(file, page, offset, offset+bytes); |
2018 | if (likely(copied > 0)) { |
2019 | if (!status) |
2020 | status = copied; |
2021 | |
2022 | if (status >= 0) { |
2023 | written += status; |
2024 | count -= status; |
2025 | pos += status; |
2026 | buf += status; |
2027 | if (unlikely(nr_segs > 1)) { |
2028 | filemap_set_next_iovec(&cur_iov, |
2029 | &iov_base, status); |
2030 | buf = cur_iov->iov_base + iov_base; |
2031 | } else { |
2032 | iov_base += status; |
2033 | } |
2034 | } |
2035 | } |
2036 | if (unlikely(copied != bytes)) |
2037 | if (status >= 0) |
2038 | status = -EFAULT; |
2039 | unlock_page(page); |
2040 | mark_page_accessed(page); |
2041 | page_cache_release(page); |
2042 | if (status < 0) |
2043 | break; |
2044 | balance_dirty_pages_ratelimited(mapping); |
2045 | cond_resched(); |
2046 | } while (count); |
2047 | *ppos = pos; |
2048 | |
2049 | if (cached_page) |
2050 | page_cache_release(cached_page); |
2051 | |
2052 | /* |
2053 | * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC |
2054 | */ |
2055 | if (likely(status >= 0)) { |
2056 | if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
2057 | if (!a_ops->writepage || !is_sync_kiocb(iocb)) |
2058 | status = generic_osync_inode(inode, mapping, |
2059 | OSYNC_METADATA|OSYNC_DATA); |
2060 | } |
2061 | } |
2062 | |
2063 | /* |
2064 | * If we get here for O_DIRECT writes then we must have fallen through |
2065 | * to buffered writes (block instantiation inside i_size). So we sync |
2066 | * the file data here, to try to honour O_DIRECT expectations. |
2067 | */ |
2068 | if (unlikely(file->f_flags & O_DIRECT) && written) |
2069 | status = filemap_write_and_wait(mapping); |
2070 | |
2071 | pagevec_lru_add(&lru_pvec); |
2072 | return written ? written : status; |
2073 | } |
2074 | EXPORT_SYMBOL(generic_file_buffered_write); |
2075 | |
2076 | ssize_t |
2077 | __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, |
2078 | unsigned long nr_segs, loff_t *ppos) |
2079 | { |
2080 | struct file *file = iocb->ki_filp; |
2081 | struct address_space * mapping = file->f_mapping; |
2082 | size_t ocount; /* original count */ |
2083 | size_t count; /* after file limit checks */ |
2084 | struct inode *inode = mapping->host; |
2085 | unsigned long seg; |
2086 | loff_t pos; |
2087 | ssize_t written; |
2088 | ssize_t err; |
2089 | |
2090 | ocount = 0; |
2091 | for (seg = 0; seg < nr_segs; seg++) { |
2092 | const struct iovec *iv = &iov[seg]; |
2093 | |
2094 | /* |
2095 | * If any segment has a negative length, or the cumulative |
2096 | * length ever wraps negative then return -EINVAL. |
2097 | */ |
2098 | ocount += iv->iov_len; |
2099 | if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) |
2100 | return -EINVAL; |
2101 | if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) |
2102 | continue; |
2103 | if (seg == 0) |
2104 | return -EFAULT; |
2105 | nr_segs = seg; |
2106 | ocount -= iv->iov_len; /* This segment is no good */ |
2107 | break; |
2108 | } |
2109 | |
2110 | count = ocount; |
2111 | pos = *ppos; |
2112 | |
2113 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); |
2114 | |
2115 | /* We can write back this queue in page reclaim */ |
2116 | current->backing_dev_info = mapping->backing_dev_info; |
2117 | written = 0; |
2118 | |
2119 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); |
2120 | if (err) |
2121 | goto out; |
2122 | |
2123 | if (count == 0) |
2124 | goto out; |
2125 | |
2126 | err = remove_suid(file->f_dentry); |
2127 | if (err) |
2128 | goto out; |
2129 | |
2130 | inode_update_time(inode, 1); |
2131 | |
2132 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ |
2133 | if (unlikely(file->f_flags & O_DIRECT)) { |
2134 | written = generic_file_direct_write(iocb, iov, |
2135 | &nr_segs, pos, ppos, count, ocount); |
2136 | if (written < 0 || written == count) |
2137 | goto out; |
2138 | /* |
2139 | * direct-io write to a hole: fall through to buffered I/O |
2140 | * for completing the rest of the request. |
2141 | */ |
2142 | pos += written; |
2143 | count -= written; |
2144 | } |
2145 | |
2146 | written = generic_file_buffered_write(iocb, iov, nr_segs, |
2147 | pos, ppos, count, written); |
2148 | out: |
2149 | current->backing_dev_info = NULL; |
2150 | return written ? written : err; |
2151 | } |
2152 | EXPORT_SYMBOL(generic_file_aio_write_nolock); |
2153 | |
2154 | ssize_t |
2155 | generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, |
2156 | unsigned long nr_segs, loff_t *ppos) |
2157 | { |
2158 | struct file *file = iocb->ki_filp; |
2159 | struct address_space *mapping = file->f_mapping; |
2160 | struct inode *inode = mapping->host; |
2161 | ssize_t ret; |
2162 | loff_t pos = *ppos; |
2163 | |
2164 | ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos); |
2165 | |
2166 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
2167 | int err; |
2168 | |
2169 | err = sync_page_range_nolock(inode, mapping, pos, ret); |
2170 | if (err < 0) |
2171 | ret = err; |
2172 | } |
2173 | return ret; |
2174 | } |
2175 | |
2176 | ssize_t |
2177 | __generic_file_write_nolock(struct file *file, const struct iovec *iov, |
2178 | unsigned long nr_segs, loff_t *ppos) |
2179 | { |
2180 | struct kiocb kiocb; |
2181 | ssize_t ret; |
2182 | |
2183 | init_sync_kiocb(&kiocb, file); |
2184 | ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); |
2185 | if (ret == -EIOCBQUEUED) |
2186 | ret = wait_on_sync_kiocb(&kiocb); |
2187 | return ret; |
2188 | } |
2189 | |
2190 | ssize_t |
2191 | generic_file_write_nolock(struct file *file, const struct iovec *iov, |
2192 | unsigned long nr_segs, loff_t *ppos) |
2193 | { |
2194 | struct kiocb kiocb; |
2195 | ssize_t ret; |
2196 | |
2197 | init_sync_kiocb(&kiocb, file); |
2198 | ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); |
2199 | if (-EIOCBQUEUED == ret) |
2200 | ret = wait_on_sync_kiocb(&kiocb); |
2201 | return ret; |
2202 | } |
2203 | EXPORT_SYMBOL(generic_file_write_nolock); |
2204 | |
2205 | ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf, |
2206 | size_t count, loff_t pos) |
2207 | { |
2208 | struct file *file = iocb->ki_filp; |
2209 | struct address_space *mapping = file->f_mapping; |
2210 | struct inode *inode = mapping->host; |
2211 | ssize_t ret; |
2212 | struct iovec local_iov = { .iov_base = (void __user *)buf, |
2213 | .iov_len = count }; |
2214 | |
2215 | BUG_ON(iocb->ki_pos != pos); |
2216 | |
2217 | down(&inode->i_sem); |
2218 | ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, |
2219 | &iocb->ki_pos); |
2220 | up(&inode->i_sem); |
2221 | |
2222 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
2223 | ssize_t err; |
2224 | |
2225 | err = sync_page_range(inode, mapping, pos, ret); |
2226 | if (err < 0) |
2227 | ret = err; |
2228 | } |
2229 | return ret; |
2230 | } |
2231 | EXPORT_SYMBOL(generic_file_aio_write); |
2232 | |
2233 | ssize_t generic_file_write(struct file *file, const char __user *buf, |
2234 | size_t count, loff_t *ppos) |
2235 | { |
2236 | struct address_space *mapping = file->f_mapping; |
2237 | struct inode *inode = mapping->host; |
2238 | ssize_t ret; |
2239 | struct iovec local_iov = { .iov_base = (void __user *)buf, |
2240 | .iov_len = count }; |
2241 | |
2242 | down(&inode->i_sem); |
2243 | ret = __generic_file_write_nolock(file, &local_iov, 1, ppos); |
2244 | up(&inode->i_sem); |
2245 | |
2246 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
2247 | ssize_t err; |
2248 | |
2249 | err = sync_page_range(inode, mapping, *ppos - ret, ret); |
2250 | if (err < 0) |
2251 | ret = err; |
2252 | } |
2253 | return ret; |
2254 | } |
2255 | EXPORT_SYMBOL(generic_file_write); |
2256 | |
2257 | ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, |
2258 | unsigned long nr_segs, loff_t *ppos) |
2259 | { |
2260 | struct kiocb kiocb; |
2261 | ssize_t ret; |
2262 | |
2263 | init_sync_kiocb(&kiocb, filp); |
2264 | ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos); |
2265 | if (-EIOCBQUEUED == ret) |
2266 | ret = wait_on_sync_kiocb(&kiocb); |
2267 | return ret; |
2268 | } |
2269 | EXPORT_SYMBOL(generic_file_readv); |
2270 | |
2271 | ssize_t generic_file_writev(struct file *file, const struct iovec *iov, |
2272 | unsigned long nr_segs, loff_t *ppos) |
2273 | { |
2274 | struct address_space *mapping = file->f_mapping; |
2275 | struct inode *inode = mapping->host; |
2276 | ssize_t ret; |
2277 | |
2278 | down(&inode->i_sem); |
2279 | ret = __generic_file_write_nolock(file, iov, nr_segs, ppos); |
2280 | up(&inode->i_sem); |
2281 | |
2282 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
2283 | int err; |
2284 | |
2285 | err = sync_page_range(inode, mapping, *ppos - ret, ret); |
2286 | if (err < 0) |
2287 | ret = err; |
2288 | } |
2289 | return ret; |
2290 | } |
2291 | EXPORT_SYMBOL(generic_file_writev); |
2292 | |
2293 | /* |
2294 | * Called under i_sem for writes to S_ISREG files. Returns -EIO if something |
2295 | * went wrong during pagecache shootdown. |
2296 | */ |
2297 | ssize_t |
2298 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, |
2299 | loff_t offset, unsigned long nr_segs) |
2300 | { |
2301 | struct file *file = iocb->ki_filp; |
2302 | struct address_space *mapping = file->f_mapping; |
2303 | ssize_t retval; |
2304 | size_t write_len = 0; |
2305 | |
2306 | /* |
2307 | * If it's a write, unmap all mmappings of the file up-front. This |
2308 | * will cause any pte dirty bits to be propagated into the pageframes |
2309 | * for the subsequent filemap_write_and_wait(). |
2310 | */ |
2311 | if (rw == WRITE) { |
2312 | write_len = iov_length(iov, nr_segs); |
2313 | if (mapping_mapped(mapping)) |
2314 | unmap_mapping_range(mapping, offset, write_len, 0); |
2315 | } |
2316 | |
2317 | retval = filemap_write_and_wait(mapping); |
2318 | if (retval == 0) { |
2319 | retval = mapping->a_ops->direct_IO(rw, iocb, iov, |
2320 | offset, nr_segs); |
2321 | if (rw == WRITE && mapping->nrpages) { |
2322 | pgoff_t end = (offset + write_len - 1) |
2323 | >> PAGE_CACHE_SHIFT; |
2324 | int err = invalidate_inode_pages2_range(mapping, |
2325 | offset >> PAGE_CACHE_SHIFT, end); |
2326 | if (err) |
2327 | retval = err; |
2328 | } |
2329 | } |
2330 | return retval; |
2331 | } |
2332 | EXPORT_SYMBOL_GPL(generic_file_direct_IO); |