2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_dinode.h"
28 #include "xfs_inode.h"
29 #include "xfs_alloc.h"
30 #include "xfs_error.h"
32 #include "xfs_iomap.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_trace.h"
36 #include <linux/gfp.h>
37 #include <linux/mpage.h>
38 #include <linux/pagevec.h>
39 #include <linux/writeback.h>
42 * Types of I/O for bmap clustering and I/O completion tracking.
45 IO_READ, /* mapping for a read */
46 IO_DELAY, /* mapping covers delalloc region */
47 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
48 IO_NEW /* just allocated */
52 * Prime number of hash buckets since address is used as the key.
55 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
56 static wait_queue_head_t xfs_ioend_wq[NVSYNC];
63 for (i = 0; i < NVSYNC; i++)
64 init_waitqueue_head(&xfs_ioend_wq[i]);
71 wait_queue_head_t *wq = to_ioend_wq(ip);
73 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
80 if (atomic_dec_and_test(&ip->i_iocount))
81 wake_up(to_ioend_wq(ip));
91 struct buffer_head *bh, *head;
93 *delalloc = *unmapped = *unwritten = 0;
95 bh = head = page_buffers(page);
97 if (buffer_uptodate(bh) && !buffer_mapped(bh))
99 else if (buffer_unwritten(bh))
101 else if (buffer_delay(bh))
103 } while ((bh = bh->b_this_page) != head);
106 STATIC struct block_device *
107 xfs_find_bdev_for_inode(
110 struct xfs_inode *ip = XFS_I(inode);
111 struct xfs_mount *mp = ip->i_mount;
113 if (XFS_IS_REALTIME_INODE(ip))
114 return mp->m_rtdev_targp->bt_bdev;
116 return mp->m_ddev_targp->bt_bdev;
120 * We're now finished for good with this ioend structure.
121 * Update the page state via the associated buffer_heads,
122 * release holds on the inode and bio, and finally free
123 * up memory. Do not use the ioend after this.
129 struct buffer_head *bh, *next;
130 struct xfs_inode *ip = XFS_I(ioend->io_inode);
132 for (bh = ioend->io_buffer_head; bh; bh = next) {
133 next = bh->b_private;
134 bh->b_end_io(bh, !ioend->io_error);
138 * Volume managers supporting multiple paths can send back ENODEV
139 * when the final path disappears. In this case continuing to fill
140 * the page cache with dirty data which cannot be written out is
141 * evil, so prevent that.
143 if (unlikely(ioend->io_error == -ENODEV)) {
144 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
149 mempool_free(ioend, xfs_ioend_pool);
153 * If the end of the current ioend is beyond the current EOF,
154 * return the new EOF value, otherwise zero.
160 xfs_inode_t *ip = XFS_I(ioend->io_inode);
164 bsize = ioend->io_offset + ioend->io_size;
165 isize = MAX(ip->i_size, ip->i_new_size);
166 isize = MIN(isize, bsize);
167 return isize > ip->i_d.di_size ? isize : 0;
171 * Update on-disk file size now that data has been written to disk. The
172 * current in-memory file size is i_size. If a write is beyond eof i_new_size
173 * will be the intended file size until i_size is updated. If this write does
174 * not extend all the way to the valid file size then restrict this update to
175 * the end of the write.
177 * This function does not block as blocking on the inode lock in IO completion
178 * can lead to IO completion order dependency deadlocks.. If it can't get the
179 * inode ilock it will return EAGAIN. Callers must handle this.
185 xfs_inode_t *ip = XFS_I(ioend->io_inode);
188 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
189 ASSERT(ioend->io_type != IO_READ);
191 if (unlikely(ioend->io_error))
194 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
197 isize = xfs_ioend_new_eof(ioend);
199 ip->i_d.di_size = isize;
200 xfs_mark_inode_dirty(ip);
203 xfs_iunlock(ip, XFS_ILOCK_EXCL);
208 * Schedule IO completion handling on a xfsdatad if this was
209 * the final hold on this ioend. If we are asked to wait,
210 * flush the workqueue.
217 if (atomic_dec_and_test(&ioend->io_remaining)) {
218 struct workqueue_struct *wq;
220 wq = (ioend->io_type == IO_UNWRITTEN) ?
221 xfsconvertd_workqueue : xfsdatad_workqueue;
222 queue_work(wq, &ioend->io_work);
229 * IO write completion.
233 struct work_struct *work)
235 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
236 struct xfs_inode *ip = XFS_I(ioend->io_inode);
240 * For unwritten extents we need to issue transactions to convert a
241 * range to normal written extens after the data I/O has finished.
243 if (ioend->io_type == IO_UNWRITTEN &&
244 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
246 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
249 ioend->io_error = error;
253 * We might have to update the on-disk file size after extending
256 if (ioend->io_type != IO_READ) {
257 error = xfs_setfilesize(ioend);
258 ASSERT(!error || error == EAGAIN);
262 * If we didn't complete processing of the ioend, requeue it to the
263 * tail of the workqueue for another attempt later. Otherwise destroy
266 if (error == EAGAIN) {
267 atomic_inc(&ioend->io_remaining);
268 xfs_finish_ioend(ioend, 0);
269 /* ensure we don't spin on blocked ioends */
272 xfs_destroy_ioend(ioend);
276 * Allocate and initialise an IO completion structure.
277 * We need to track unwritten extent write completion here initially.
278 * We'll need to extend this for updating the ondisk inode size later
288 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
291 * Set the count to 1 initially, which will prevent an I/O
292 * completion callback from happening before we have started
293 * all the I/O from calling the completion routine too early.
295 atomic_set(&ioend->io_remaining, 1);
297 ioend->io_list = NULL;
298 ioend->io_type = type;
299 ioend->io_inode = inode;
300 ioend->io_buffer_head = NULL;
301 ioend->io_buffer_tail = NULL;
302 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
303 ioend->io_offset = 0;
306 INIT_WORK(&ioend->io_work, xfs_end_io);
315 struct xfs_bmbt_irec *imap,
321 return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
327 struct xfs_bmbt_irec *imap,
330 offset >>= inode->i_blkbits;
332 return offset >= imap->br_startoff &&
333 offset < imap->br_startoff + imap->br_blockcount;
337 * BIO completion handler for buffered IO.
344 xfs_ioend_t *ioend = bio->bi_private;
346 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
347 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
349 /* Toss bio and pass work off to an xfsdatad thread */
350 bio->bi_private = NULL;
351 bio->bi_end_io = NULL;
354 xfs_finish_ioend(ioend, 0);
358 xfs_submit_ioend_bio(
359 struct writeback_control *wbc,
363 atomic_inc(&ioend->io_remaining);
364 bio->bi_private = ioend;
365 bio->bi_end_io = xfs_end_bio;
368 * If the I/O is beyond EOF we mark the inode dirty immediately
369 * but don't update the inode size until I/O completion.
371 if (xfs_ioend_new_eof(ioend))
372 xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
374 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
375 WRITE_SYNC_PLUG : WRITE, bio);
376 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
382 struct buffer_head *bh)
385 int nvecs = bio_get_nr_vecs(bh->b_bdev);
388 bio = bio_alloc(GFP_NOIO, nvecs);
392 ASSERT(bio->bi_private == NULL);
393 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
394 bio->bi_bdev = bh->b_bdev;
400 xfs_start_buffer_writeback(
401 struct buffer_head *bh)
403 ASSERT(buffer_mapped(bh));
404 ASSERT(buffer_locked(bh));
405 ASSERT(!buffer_delay(bh));
406 ASSERT(!buffer_unwritten(bh));
408 mark_buffer_async_write(bh);
409 set_buffer_uptodate(bh);
410 clear_buffer_dirty(bh);
414 xfs_start_page_writeback(
419 ASSERT(PageLocked(page));
420 ASSERT(!PageWriteback(page));
422 clear_page_dirty_for_io(page);
423 set_page_writeback(page);
425 /* If no buffers on the page are to be written, finish it here */
427 end_page_writeback(page);
430 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
432 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
436 * Submit all of the bios for all of the ioends we have saved up, covering the
437 * initial writepage page and also any probed pages.
439 * Because we may have multiple ioends spanning a page, we need to start
440 * writeback on all the buffers before we submit them for I/O. If we mark the
441 * buffers as we got, then we can end up with a page that only has buffers
442 * marked async write and I/O complete on can occur before we mark the other
443 * buffers async write.
445 * The end result of this is that we trip a bug in end_page_writeback() because
446 * we call it twice for the one page as the code in end_buffer_async_write()
447 * assumes that all buffers on the page are started at the same time.
449 * The fix is two passes across the ioend list - one to start writeback on the
450 * buffer_heads, and then submit them for I/O on the second pass.
454 struct writeback_control *wbc,
457 xfs_ioend_t *head = ioend;
459 struct buffer_head *bh;
461 sector_t lastblock = 0;
463 /* Pass 1 - start writeback */
465 next = ioend->io_list;
466 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
467 xfs_start_buffer_writeback(bh);
469 } while ((ioend = next) != NULL);
471 /* Pass 2 - submit I/O */
474 next = ioend->io_list;
477 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
481 bio = xfs_alloc_ioend_bio(bh);
482 } else if (bh->b_blocknr != lastblock + 1) {
483 xfs_submit_ioend_bio(wbc, ioend, bio);
487 if (bio_add_buffer(bio, bh) != bh->b_size) {
488 xfs_submit_ioend_bio(wbc, ioend, bio);
492 lastblock = bh->b_blocknr;
495 xfs_submit_ioend_bio(wbc, ioend, bio);
496 xfs_finish_ioend(ioend, 0);
497 } while ((ioend = next) != NULL);
501 * Cancel submission of all buffer_heads so far in this endio.
502 * Toss the endio too. Only ever called for the initial page
503 * in a writepage request, so only ever one page.
510 struct buffer_head *bh, *next_bh;
513 next = ioend->io_list;
514 bh = ioend->io_buffer_head;
516 next_bh = bh->b_private;
517 clear_buffer_async_write(bh);
519 } while ((bh = next_bh) != NULL);
521 xfs_ioend_wake(XFS_I(ioend->io_inode));
522 mempool_free(ioend, xfs_ioend_pool);
523 } while ((ioend = next) != NULL);
527 * Test to see if we've been building up a completion structure for
528 * earlier buffers -- if so, we try to append to this ioend if we
529 * can, otherwise we finish off any current ioend and start another.
530 * Return true if we've finished the given ioend.
535 struct buffer_head *bh,
538 xfs_ioend_t **result,
541 xfs_ioend_t *ioend = *result;
543 if (!ioend || need_ioend || type != ioend->io_type) {
544 xfs_ioend_t *previous = *result;
546 ioend = xfs_alloc_ioend(inode, type);
547 ioend->io_offset = offset;
548 ioend->io_buffer_head = bh;
549 ioend->io_buffer_tail = bh;
551 previous->io_list = ioend;
554 ioend->io_buffer_tail->b_private = bh;
555 ioend->io_buffer_tail = bh;
558 bh->b_private = NULL;
559 ioend->io_size += bh->b_size;
565 struct buffer_head *bh,
566 struct xfs_bmbt_irec *imap,
570 struct xfs_mount *m = XFS_I(inode)->i_mount;
571 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
572 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
574 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
575 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
577 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
578 ((offset - iomap_offset) >> inode->i_blkbits);
580 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
583 set_buffer_mapped(bh);
589 struct buffer_head *bh,
590 struct xfs_bmbt_irec *imap,
593 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
594 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
597 xfs_map_buffer(inode, bh, imap, offset);
598 bh->b_bdev = xfs_find_bdev_for_inode(inode);
599 set_buffer_mapped(bh);
600 clear_buffer_delay(bh);
601 clear_buffer_unwritten(bh);
605 * Look for a page at index that is suitable for clustering.
610 unsigned int pg_offset,
615 if (PageWriteback(page))
618 if (page->mapping && PageDirty(page)) {
619 if (page_has_buffers(page)) {
620 struct buffer_head *bh, *head;
622 bh = head = page_buffers(page);
624 if (!buffer_uptodate(bh))
626 if (mapped != buffer_mapped(bh))
629 if (ret >= pg_offset)
631 } while ((bh = bh->b_this_page) != head);
633 ret = mapped ? 0 : PAGE_CACHE_SIZE;
642 struct page *startpage,
643 struct buffer_head *bh,
644 struct buffer_head *head,
648 pgoff_t tindex, tlast, tloff;
652 /* First sum forwards in this page */
654 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
657 } while ((bh = bh->b_this_page) != head);
659 /* if we reached the end of the page, sum forwards in following pages */
660 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
661 tindex = startpage->index + 1;
663 /* Prune this back to avoid pathological behavior */
664 tloff = min(tlast, startpage->index + 64);
666 pagevec_init(&pvec, 0);
667 while (!done && tindex <= tloff) {
668 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
670 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
673 for (i = 0; i < pagevec_count(&pvec); i++) {
674 struct page *page = pvec.pages[i];
675 size_t pg_offset, pg_len = 0;
677 if (tindex == tlast) {
679 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
685 pg_offset = PAGE_CACHE_SIZE;
687 if (page->index == tindex && trylock_page(page)) {
688 pg_len = xfs_probe_page(page, pg_offset, mapped);
701 pagevec_release(&pvec);
709 * Test if a given page is suitable for writing as part of an unwritten
710 * or delayed allocate extent.
717 if (PageWriteback(page))
720 if (page->mapping && page_has_buffers(page)) {
721 struct buffer_head *bh, *head;
724 bh = head = page_buffers(page);
726 if (buffer_unwritten(bh))
727 acceptable = (type == IO_UNWRITTEN);
728 else if (buffer_delay(bh))
729 acceptable = (type == IO_DELAY);
730 else if (buffer_dirty(bh) && buffer_mapped(bh))
731 acceptable = (type == IO_NEW);
734 } while ((bh = bh->b_this_page) != head);
744 * Allocate & map buffers for page given the extent map. Write it out.
745 * except for the original page of a writepage, this is called on
746 * delalloc/unwritten pages only, for the original page it is possible
747 * that the page has no mapping at all.
754 struct xfs_bmbt_irec *imap,
755 xfs_ioend_t **ioendp,
756 struct writeback_control *wbc,
759 struct buffer_head *bh, *head;
760 xfs_off_t end_offset;
761 unsigned long p_offset;
764 int count = 0, done = 0, uptodate = 1;
765 xfs_off_t offset = page_offset(page);
767 if (page->index != tindex)
769 if (!trylock_page(page))
771 if (PageWriteback(page))
772 goto fail_unlock_page;
773 if (page->mapping != inode->i_mapping)
774 goto fail_unlock_page;
775 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
776 goto fail_unlock_page;
779 * page_dirty is initially a count of buffers on the page before
780 * EOF and is decremented as we move each into a cleanable state.
784 * End offset is the highest offset that this page should represent.
785 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
786 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
787 * hence give us the correct page_dirty count. On any other page,
788 * it will be zero and in that case we need page_dirty to be the
789 * count of buffers on the page.
791 end_offset = min_t(unsigned long long,
792 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
795 len = 1 << inode->i_blkbits;
796 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
798 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
799 page_dirty = p_offset / len;
801 bh = head = page_buffers(page);
803 if (offset >= end_offset)
805 if (!buffer_uptodate(bh))
807 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
812 if (buffer_unwritten(bh) || buffer_delay(bh)) {
813 if (buffer_unwritten(bh))
818 if (!xfs_imap_valid(inode, imap, offset)) {
823 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
824 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
826 xfs_map_at_offset(inode, bh, imap, offset);
827 xfs_add_to_ioend(inode, bh, offset, type,
834 if (buffer_mapped(bh) && all_bh) {
836 xfs_add_to_ioend(inode, bh, offset,
844 } while (offset += len, (bh = bh->b_this_page) != head);
846 if (uptodate && bh == head)
847 SetPageUptodate(page);
851 if (wbc->nr_to_write <= 0)
854 xfs_start_page_writeback(page, !page_dirty, count);
864 * Convert & write out a cluster of pages in the same extent as defined
865 * by mp and following the start page.
871 struct xfs_bmbt_irec *imap,
872 xfs_ioend_t **ioendp,
873 struct writeback_control *wbc,
880 pagevec_init(&pvec, 0);
881 while (!done && tindex <= tlast) {
882 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
884 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
887 for (i = 0; i < pagevec_count(&pvec); i++) {
888 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
889 imap, ioendp, wbc, all_bh);
894 pagevec_release(&pvec);
900 xfs_vm_invalidatepage(
902 unsigned long offset)
904 trace_xfs_invalidatepage(page->mapping->host, page, offset);
905 block_invalidatepage(page, offset);
909 * If the page has delalloc buffers on it, we need to punch them out before we
910 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
911 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
912 * is done on that same region - the delalloc extent is returned when none is
913 * supposed to be there.
915 * We prevent this by truncating away the delalloc regions on the page before
916 * invalidating it. Because they are delalloc, we can do this without needing a
917 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
918 * truncation without a transaction as there is no space left for block
919 * reservation (typically why we see a ENOSPC in writeback).
921 * This is not a performance critical path, so for now just do the punching a
922 * buffer head at a time.
925 xfs_aops_discard_page(
928 struct inode *inode = page->mapping->host;
929 struct xfs_inode *ip = XFS_I(inode);
930 struct buffer_head *bh, *head;
931 loff_t offset = page_offset(page);
932 ssize_t len = 1 << inode->i_blkbits;
934 if (!xfs_is_delayed_page(page, IO_DELAY))
937 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
940 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
941 "page discard on page %p, inode 0x%llx, offset %llu.",
942 page, ip->i_ino, offset);
944 xfs_ilock(ip, XFS_ILOCK_EXCL);
945 bh = head = page_buffers(page);
948 xfs_fileoff_t offset_fsb;
949 xfs_bmbt_irec_t imap;
952 xfs_fsblock_t firstblock;
953 xfs_bmap_free_t flist;
955 if (!buffer_delay(bh))
958 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
961 * Map the range first and check that it is a delalloc extent
962 * before trying to unmap the range. Otherwise we will be
963 * trying to remove a real extent (which requires a
964 * transaction) or a hole, which is probably a bad idea...
966 error = xfs_bmapi(NULL, ip, offset_fsb, 1,
967 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
971 /* something screwed, just bail */
972 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
973 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
974 "page discard failed delalloc mapping lookup.");
982 if (imap.br_startblock != DELAYSTARTBLOCK) {
983 /* been converted, ignore */
986 WARN_ON(imap.br_blockcount == 0);
989 * Note: while we initialise the firstblock/flist pair, they
990 * should never be used because blocks should never be
991 * allocated or freed for a delalloc extent and hence we need
992 * don't cancel or finish them after the xfs_bunmapi() call.
994 xfs_bmap_init(&flist, &firstblock);
995 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
998 ASSERT(!flist.xbf_count && !flist.xbf_first);
1000 /* something screwed, just bail */
1001 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1002 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
1003 "page discard unable to remove delalloc mapping.");
1010 } while ((bh = bh->b_this_page) != head);
1012 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1014 xfs_vm_invalidatepage(page, 0);
1019 * Write out a dirty page.
1021 * For delalloc space on the page we need to allocate space and flush it.
1022 * For unwritten space on the page we need to start the conversion to
1023 * regular allocated space.
1024 * For unmapped buffer heads on the page we should allocate space if the
1026 * For any other dirty buffer heads on the page we should flush them.
1028 * If we detect that a transaction would be required to flush the page, we
1029 * have to check the process flags first, if we are already in a transaction
1030 * or disk I/O during allocations is off, we need to fail the writepage and
1033 * The bh->b_state's cannot know if any of the blocks or which block for that
1034 * matter are dirty due to mmap writes, and therefore bh uptodate is only
1035 * valid if the page itself isn't completely uptodate.
1040 struct writeback_control *wbc)
1042 struct inode *inode = page->mapping->host;
1044 int delalloc, unmapped, unwritten;
1045 struct buffer_head *bh, *head;
1046 struct xfs_bmbt_irec imap;
1047 xfs_ioend_t *ioend = NULL, *iohead = NULL;
1050 __uint64_t end_offset;
1051 pgoff_t end_index, last_index;
1053 int flags, err, imap_valid = 0, uptodate = 1;
1057 trace_xfs_writepage(inode, page, 0);
1060 * Refuse to write the page out if we are called from reclaim context.
1062 * This is primarily to avoid stack overflows when called from deep
1063 * used stacks in random callers for direct reclaim, but disabling
1064 * reclaim for kswap is a nice side-effect as kswapd causes rather
1065 * suboptimal I/O patters, too.
1067 * This should really be done by the core VM, but until that happens
1068 * filesystems like XFS, btrfs and ext4 have to take care of this
1071 if (current->flags & PF_MEMALLOC)
1075 * We need a transaction if:
1076 * 1. There are delalloc buffers on the page
1077 * 2. The page is uptodate and we have unmapped buffers
1078 * 3. The page is uptodate and we have no buffers
1079 * 4. There are unwritten buffers on the page
1081 if (!page_has_buffers(page)) {
1085 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1086 if (!PageUptodate(page))
1088 need_trans = delalloc + unmapped + unwritten;
1092 * If we need a transaction and the process flags say
1093 * we are already in a transaction, or no IO is allowed
1094 * then mark the page dirty again and leave the page
1097 if (current_test_flags(PF_FSTRANS) && need_trans)
1101 * Delay hooking up buffer heads until we have
1102 * made our go/no-go decision.
1104 if (!page_has_buffers(page))
1105 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1107 /* Is this page beyond the end of the file? */
1108 offset = i_size_read(inode);
1109 end_index = offset >> PAGE_CACHE_SHIFT;
1110 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1111 if (page->index >= end_index) {
1112 if ((page->index >= end_index + 1) ||
1113 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
1119 end_offset = min_t(unsigned long long,
1120 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
1121 len = 1 << inode->i_blkbits;
1123 bh = head = page_buffers(page);
1124 offset = page_offset(page);
1131 if (offset >= end_offset)
1133 if (!buffer_uptodate(bh))
1137 * A hole may still be marked uptodate because discard_buffer
1138 * leaves the flag set.
1140 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1141 ASSERT(!buffer_dirty(bh));
1147 imap_valid = xfs_imap_valid(inode, &imap, offset);
1150 * First case, map an unwritten extent and prepare for
1151 * extent state conversion transaction on completion.
1153 * Second case, allocate space for a delalloc buffer.
1154 * We can return EAGAIN here in the release page case.
1156 * Third case, an unmapped buffer was found, and we are
1157 * in a path where we need to write the whole page out.
1159 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1160 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1161 !buffer_mapped(bh))) {
1165 * Make sure we don't use a read-only iomap
1167 if (flags == BMAPI_READ)
1170 if (buffer_unwritten(bh)) {
1171 type = IO_UNWRITTEN;
1172 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1173 } else if (buffer_delay(bh)) {
1175 flags = BMAPI_ALLOCATE;
1177 if (wbc->sync_mode == WB_SYNC_NONE &&
1179 flags |= BMAPI_TRYLOCK;
1182 flags = BMAPI_WRITE | BMAPI_MMAP;
1187 * if we didn't have a valid mapping then we
1188 * need to ensure that we put the new mapping
1189 * in a new ioend structure. This needs to be
1190 * done to ensure that the ioends correctly
1191 * reflect the block mappings at io completion
1192 * for unwritten extent conversion.
1195 if (type == IO_NEW) {
1196 size = xfs_probe_cluster(inode,
1202 err = xfs_map_blocks(inode, offset, size,
1206 imap_valid = xfs_imap_valid(inode, &imap,
1210 xfs_map_at_offset(inode, bh, &imap, offset);
1211 xfs_add_to_ioend(inode, bh, offset, type,
1215 } else if (buffer_uptodate(bh)) {
1217 * we got here because the buffer is already mapped.
1218 * That means it must already have extents allocated
1219 * underneath it. Map the extent by reading it.
1221 if (!imap_valid || flags != BMAPI_READ) {
1223 size = xfs_probe_cluster(inode, page, bh,
1225 err = xfs_map_blocks(inode, offset, size,
1229 imap_valid = xfs_imap_valid(inode, &imap,
1234 * We set the type to IO_NEW in case we are doing a
1235 * small write at EOF that is extending the file but
1236 * without needing an allocation. We need to update the
1237 * file size on I/O completion in this case so it is
1238 * the same case as having just allocated a new extent
1239 * that we are writing into for the first time.
1242 if (trylock_buffer(bh)) {
1243 ASSERT(buffer_mapped(bh));
1246 xfs_add_to_ioend(inode, bh, offset, type,
1247 &ioend, !imap_valid);
1252 } else if (PageUptodate(page)) {
1259 } while (offset += len, ((bh = bh->b_this_page) != head));
1261 if (uptodate && bh == head)
1262 SetPageUptodate(page);
1264 xfs_start_page_writeback(page, 1, count);
1266 if (ioend && imap_valid) {
1267 xfs_off_t end_index;
1269 end_index = imap.br_startoff + imap.br_blockcount;
1272 end_index <<= inode->i_blkbits;
1275 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1277 /* check against file size */
1278 if (end_index > last_index)
1279 end_index = last_index;
1281 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1282 wbc, all_bh, end_index);
1286 xfs_submit_ioend(wbc, iohead);
1292 xfs_cancel_ioend(iohead);
1295 xfs_aops_discard_page(page);
1296 ClearPageUptodate(page);
1301 redirty_page_for_writepage(wbc, page);
1308 struct address_space *mapping,
1309 struct writeback_control *wbc)
1311 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1312 return generic_writepages(mapping, wbc);
1316 * Called to move a page into cleanable state - and from there
1317 * to be released. The page should already be clean. We always
1318 * have buffer heads in this call.
1320 * Returns 1 if the page is ok to release, 0 otherwise.
1327 int delalloc, unmapped, unwritten;
1329 trace_xfs_releasepage(page->mapping->host, page, 0);
1331 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1333 if (WARN_ON(delalloc))
1335 if (WARN_ON(unwritten))
1338 return try_to_free_buffers(page);
1343 struct inode *inode,
1345 struct buffer_head *bh_result,
1348 bmapi_flags_t flags)
1350 struct xfs_bmbt_irec imap;
1357 offset = (xfs_off_t)iblock << inode->i_blkbits;
1358 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1359 size = bh_result->b_size;
1361 if (!create && direct && offset >= i_size_read(inode))
1364 error = xfs_iomap(XFS_I(inode), offset, size,
1365 create ? flags : BMAPI_READ, &imap, &nimap, &new);
1371 if (imap.br_startblock != HOLESTARTBLOCK &&
1372 imap.br_startblock != DELAYSTARTBLOCK) {
1374 * For unwritten extents do not report a disk address on
1375 * the read case (treat as if we're reading into a hole).
1377 if (create || !ISUNWRITTEN(&imap))
1378 xfs_map_buffer(inode, bh_result, &imap, offset);
1379 if (create && ISUNWRITTEN(&imap)) {
1381 bh_result->b_private = inode;
1382 set_buffer_unwritten(bh_result);
1387 * If this is a realtime file, data may be on a different device.
1388 * to that pointed to from the buffer_head b_bdev currently.
1390 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1393 * If we previously allocated a block out beyond eof and we are now
1394 * coming back to use it then we will need to flag it as new even if it
1395 * has a disk address.
1397 * With sub-block writes into unwritten extents we also need to mark
1398 * the buffer as new so that the unwritten parts of the buffer gets
1402 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1403 (offset >= i_size_read(inode)) ||
1404 (new || ISUNWRITTEN(&imap))))
1405 set_buffer_new(bh_result);
1407 if (imap.br_startblock == DELAYSTARTBLOCK) {
1410 set_buffer_uptodate(bh_result);
1411 set_buffer_mapped(bh_result);
1412 set_buffer_delay(bh_result);
1417 * If this is O_DIRECT or the mpage code calling tell them how large
1418 * the mapping is, so that we can avoid repeated get_blocks calls.
1420 if (direct || size > (1 << inode->i_blkbits)) {
1421 xfs_off_t mapping_size;
1423 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1424 mapping_size <<= inode->i_blkbits;
1426 ASSERT(mapping_size > 0);
1427 if (mapping_size > size)
1428 mapping_size = size;
1429 if (mapping_size > LONG_MAX)
1430 mapping_size = LONG_MAX;
1432 bh_result->b_size = mapping_size;
1440 struct inode *inode,
1442 struct buffer_head *bh_result,
1445 return __xfs_get_blocks(inode, iblock,
1446 bh_result, create, 0, BMAPI_WRITE);
1450 xfs_get_blocks_direct(
1451 struct inode *inode,
1453 struct buffer_head *bh_result,
1456 return __xfs_get_blocks(inode, iblock,
1457 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1467 xfs_ioend_t *ioend = iocb->private;
1470 * Non-NULL private data means we need to issue a transaction to
1471 * convert a range from unwritten to written extents. This needs
1472 * to happen from process context but aio+dio I/O completion
1473 * happens from irq context so we need to defer it to a workqueue.
1474 * This is not necessary for synchronous direct I/O, but we do
1475 * it anyway to keep the code uniform and simpler.
1477 * Well, if only it were that simple. Because synchronous direct I/O
1478 * requires extent conversion to occur *before* we return to userspace,
1479 * we have to wait for extent conversion to complete. Look at the
1480 * iocb that has been passed to us to determine if this is AIO or
1481 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1482 * workqueue and wait for it to complete.
1484 * The core direct I/O code might be changed to always call the
1485 * completion handler in the future, in which case all this can
1488 ioend->io_offset = offset;
1489 ioend->io_size = size;
1490 if (ioend->io_type == IO_READ) {
1491 xfs_finish_ioend(ioend, 0);
1492 } else if (private && size > 0) {
1493 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1496 * A direct I/O write ioend starts it's life in unwritten
1497 * state in case they map an unwritten extent. This write
1498 * didn't map an unwritten extent so switch it's completion
1501 ioend->io_type = IO_NEW;
1502 xfs_finish_ioend(ioend, 0);
1506 * blockdev_direct_IO can return an error even after the I/O
1507 * completion handler was called. Thus we need to protect
1508 * against double-freeing.
1510 iocb->private = NULL;
1517 const struct iovec *iov,
1519 unsigned long nr_segs)
1521 struct file *file = iocb->ki_filp;
1522 struct inode *inode = file->f_mapping->host;
1523 struct block_device *bdev;
1526 bdev = xfs_find_bdev_for_inode(inode);
1528 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1529 IO_UNWRITTEN : IO_READ);
1531 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1533 xfs_get_blocks_direct,
1536 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1537 xfs_destroy_ioend(iocb->private);
1544 struct address_space *mapping,
1548 struct page **pagep,
1552 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1558 struct address_space *mapping,
1561 struct inode *inode = (struct inode *)mapping->host;
1562 struct xfs_inode *ip = XFS_I(inode);
1564 xfs_itrace_entry(XFS_I(inode));
1565 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1566 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1567 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1568 return generic_block_bmap(mapping, block, xfs_get_blocks);
1573 struct file *unused,
1576 return mpage_readpage(page, xfs_get_blocks);
1581 struct file *unused,
1582 struct address_space *mapping,
1583 struct list_head *pages,
1586 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1589 const struct address_space_operations xfs_address_space_operations = {
1590 .readpage = xfs_vm_readpage,
1591 .readpages = xfs_vm_readpages,
1592 .writepage = xfs_vm_writepage,
1593 .writepages = xfs_vm_writepages,
1594 .sync_page = block_sync_page,
1595 .releasepage = xfs_vm_releasepage,
1596 .invalidatepage = xfs_vm_invalidatepage,
1597 .write_begin = xfs_vm_write_begin,
1598 .write_end = generic_write_end,
1599 .bmap = xfs_vm_bmap,
1600 .direct_IO = xfs_vm_direct_IO,
1601 .migratepage = buffer_migrate_page,
1602 .is_partially_uptodate = block_is_partially_uptodate,
1603 .error_remove_page = generic_error_remove_page,