2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_inode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_error.h"
39 #include "xfs_mru_cache.h"
40 #include "xfs_filestream.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_utils.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_inode_item.h"
46 #include "xfs_quota.h"
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
57 struct inode *inode = VFS_I(ip);
58 struct address_space *mapping = inode->i_mapping;
61 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
64 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
65 if (flags & SYNC_TRYLOCK)
67 xfs_ilock(ip, XFS_IOLOCK_SHARED);
70 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
71 0 : XFS_B_ASYNC, FI_NONE);
72 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
75 if (flags & SYNC_IOWAIT)
81 * Sync all the inodes in the given AG according to the
82 * direction given by the flags.
90 xfs_perag_t *pag = &mp->m_perag[ag];
92 uint32_t first_index = 0;
98 xfs_inode_t *ip = NULL;
99 int lock_flags = XFS_ILOCK_SHARED;
102 * use a gang lookup to find the next inode in the tree
103 * as the tree is sparse and a gang lookup walks to find
104 * the number of objects requested.
106 read_lock(&pag->pag_ici_lock);
107 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
108 (void**)&ip, first_index, 1);
111 read_unlock(&pag->pag_ici_lock);
116 * Update the index for the next lookup. Catch overflows
117 * into the next AG range which can occur if we have inodes
118 * in the last block of the AG and we are currently
119 * pointing to the last inode.
121 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
122 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
123 read_unlock(&pag->pag_ici_lock);
127 /* nothing to sync during shutdown */
128 if (XFS_FORCED_SHUTDOWN(mp)) {
129 read_unlock(&pag->pag_ici_lock);
134 * If we can't get a reference on the inode, it must be
135 * in reclaim. Leave it for the reclaim code to flush.
139 read_unlock(&pag->pag_ici_lock);
142 read_unlock(&pag->pag_ici_lock);
144 /* avoid new or bad inodes */
145 if (is_bad_inode(inode) ||
146 xfs_iflags_test(ip, XFS_INEW)) {
152 * If we have to flush data or wait for I/O completion
153 * we need to hold the iolock.
155 if (flags & SYNC_DELWRI)
156 error = xfs_sync_inode_data(ip, flags);
158 xfs_ilock(ip, XFS_ILOCK_SHARED);
159 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
160 if (flags & SYNC_WAIT) {
162 if (!xfs_inode_clean(ip))
163 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
166 } else if (xfs_iflock_nowait(ip)) {
167 if (!xfs_inode_clean(ip))
168 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
173 xfs_iput(ip, lock_flags);
178 * bail out if the filesystem is corrupted.
180 if (error == EFSCORRUPTED)
181 return XFS_ERROR(error);
196 int lflags = XFS_LOG_FORCE;
198 if (mp->m_flags & XFS_MOUNT_RDONLY)
203 if (flags & SYNC_WAIT)
204 lflags |= XFS_LOG_SYNC;
206 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
207 if (!mp->m_perag[i].pag_ici_init)
209 error = xfs_sync_inodes_ag(mp, i, flags);
212 if (error == EFSCORRUPTED)
215 if (flags & SYNC_DELWRI)
216 xfs_log_force(mp, 0, lflags);
218 return XFS_ERROR(last_error);
222 xfs_commit_dummy_trans(
223 struct xfs_mount *mp,
226 struct xfs_inode *ip = mp->m_rootip;
227 struct xfs_trans *tp;
231 * Put a dummy transaction in the log to tell recovery
232 * that all others are OK.
234 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
235 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
237 xfs_trans_cancel(tp, 0);
241 xfs_ilock(ip, XFS_ILOCK_EXCL);
243 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
244 xfs_trans_ihold(tp, ip);
245 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
246 /* XXX(hch): ignoring the error here.. */
247 error = xfs_trans_commit(tp, 0);
249 xfs_iunlock(ip, XFS_ILOCK_EXCL);
251 xfs_log_force(mp, 0, log_flags);
257 struct xfs_mount *mp,
261 struct xfs_buf_log_item *bip;
265 * If this is xfssyncd() then only sync the superblock if we can
266 * lock it without sleeping and it is not pinned.
268 if (flags & SYNC_BDFLUSH) {
269 ASSERT(!(flags & SYNC_WAIT));
271 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
275 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
276 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
279 bp = xfs_getsb(mp, 0);
282 * If the buffer is pinned then push on the log so we won't
283 * get stuck waiting in the write for someone, maybe
284 * ourselves, to flush the log.
286 * Even though we just pushed the log above, we did not have
287 * the superblock buffer locked at that point so it can
288 * become pinned in between there and here.
290 if (XFS_BUF_ISPINNED(bp))
291 xfs_log_force(mp, 0, XFS_LOG_FORCE);
295 if (flags & SYNC_WAIT)
300 return xfs_bwrite(mp, bp);
309 * When remounting a filesystem read-only or freezing the filesystem, we have
310 * two phases to execute. This first phase is syncing the data before we
311 * quiesce the filesystem, and the second is flushing all the inodes out after
312 * we've waited for all the transactions created by the first phase to
313 * complete. The second phase ensures that the inodes are written to their
314 * location on disk rather than just existing in transactions in the log. This
315 * means after a quiesce there is no log replay required to write the inodes to
316 * disk (this is the main difference between a sync and a quiesce).
319 * First stage of freeze - no writers will make progress now we are here,
320 * so we flush delwri and delalloc buffers here, then wait for all I/O to
321 * complete. Data is frozen at that point. Metadata is not frozen,
322 * transactions can still occur here so don't bother flushing the buftarg
323 * because it'll just get dirty again.
327 struct xfs_mount *mp)
331 /* push non-blocking */
332 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
333 xfs_qm_sync(mp, SYNC_BDFLUSH);
334 xfs_filestream_flush(mp);
337 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
338 xfs_qm_sync(mp, SYNC_WAIT);
340 /* write superblock and hoover up shutdown errors */
341 error = xfs_sync_fsdata(mp, 0);
343 /* flush data-only devices */
344 if (mp->m_rtdev_targp)
345 XFS_bflush(mp->m_rtdev_targp);
352 struct xfs_mount *mp)
354 int count = 0, pincount;
356 xfs_flush_buftarg(mp->m_ddev_targp, 0);
357 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
360 * This loop must run at least twice. The first instance of the loop
361 * will flush most meta data but that will generate more meta data
362 * (typically directory updates). Which then must be flushed and
363 * logged before we can write the unmount record.
366 xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT);
367 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
376 * Second stage of a quiesce. The data is already synced, now we have to take
377 * care of the metadata. New transactions are already blocked, so we need to
378 * wait for any remaining transactions to drain out before proceding.
382 struct xfs_mount *mp)
386 /* wait for all modifications to complete */
387 while (atomic_read(&mp->m_active_trans) > 0)
390 /* flush inodes and push all remaining buffers out to disk */
394 * Just warn here till VFS can correctly support
395 * read-only remount without racing.
397 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
399 /* Push the superblock and write an unmount record */
400 error = xfs_log_sbcount(mp, 1);
402 xfs_fs_cmn_err(CE_WARN, mp,
403 "xfs_attr_quiesce: failed to log sb changes. "
404 "Frozen image may not be consistent.");
405 xfs_log_unmount_write(mp);
406 xfs_unmountfs_writesb(mp);
410 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
411 * Doing this has two advantages:
412 * - It saves on stack space, which is tight in certain situations
413 * - It can be used (with care) as a mechanism to avoid deadlocks.
414 * Flushing while allocating in a full filesystem requires both.
417 xfs_syncd_queue_work(
418 struct xfs_mount *mp,
420 void (*syncer)(struct xfs_mount *, void *),
421 struct completion *completion)
423 struct xfs_sync_work *work;
425 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
426 INIT_LIST_HEAD(&work->w_list);
427 work->w_syncer = syncer;
430 work->w_completion = completion;
431 spin_lock(&mp->m_sync_lock);
432 list_add_tail(&work->w_list, &mp->m_sync_list);
433 spin_unlock(&mp->m_sync_lock);
434 wake_up_process(mp->m_sync_task);
438 * Flush delayed allocate data, attempting to free up reserved space
439 * from existing allocations. At this point a new allocation attempt
440 * has failed with ENOSPC and we are in the process of scratching our
441 * heads, looking about for more room...
444 xfs_flush_inodes_work(
445 struct xfs_mount *mp,
448 struct inode *inode = arg;
449 xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK);
450 xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT);
458 struct inode *inode = VFS_I(ip);
459 DECLARE_COMPLETION_ONSTACK(completion);
462 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
463 wait_for_completion(&completion);
464 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
468 * Every sync period we need to unpin all items, reclaim inodes, sync
469 * quota and write out the superblock. We might need to cover the log
470 * to indicate it is idle.
474 struct xfs_mount *mp,
479 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
480 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
481 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
482 /* dgc: errors ignored here */
483 error = xfs_qm_sync(mp, SYNC_BDFLUSH);
484 error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
485 if (xfs_log_need_covered(mp))
486 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
489 wake_up(&mp->m_wait_single_sync_task);
496 struct xfs_mount *mp = arg;
498 xfs_sync_work_t *work, *n;
502 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
504 timeleft = schedule_timeout_interruptible(timeleft);
507 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
510 spin_lock(&mp->m_sync_lock);
512 * We can get woken by laptop mode, to do a sync -
513 * that's the (only!) case where the list would be
514 * empty with time remaining.
516 if (!timeleft || list_empty(&mp->m_sync_list)) {
518 timeleft = xfs_syncd_centisecs *
519 msecs_to_jiffies(10);
520 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
521 list_add_tail(&mp->m_sync_work.w_list,
524 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
525 list_move(&work->w_list, &tmp);
526 spin_unlock(&mp->m_sync_lock);
528 list_for_each_entry_safe(work, n, &tmp, w_list) {
529 (*work->w_syncer)(mp, work->w_data);
530 list_del(&work->w_list);
531 if (work == &mp->m_sync_work)
533 if (work->w_completion)
534 complete(work->w_completion);
544 struct xfs_mount *mp)
546 mp->m_sync_work.w_syncer = xfs_sync_worker;
547 mp->m_sync_work.w_mount = mp;
548 mp->m_sync_work.w_completion = NULL;
549 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
550 if (IS_ERR(mp->m_sync_task))
551 return -PTR_ERR(mp->m_sync_task);
557 struct xfs_mount *mp)
559 kthread_stop(mp->m_sync_task);
568 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
570 /* The hash lock here protects a thread in xfs_iget_core from
571 * racing with us on linking the inode back with a vnode.
572 * Once we have the XFS_IRECLAIM flag set it will not touch
575 write_lock(&pag->pag_ici_lock);
576 spin_lock(&ip->i_flags_lock);
577 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
578 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
579 spin_unlock(&ip->i_flags_lock);
580 write_unlock(&pag->pag_ici_lock);
583 xfs_iunlock(ip, XFS_ILOCK_EXCL);
587 __xfs_iflags_set(ip, XFS_IRECLAIM);
588 spin_unlock(&ip->i_flags_lock);
589 write_unlock(&pag->pag_ici_lock);
590 xfs_put_perag(ip->i_mount, pag);
593 * If the inode is still dirty, then flush it out. If the inode
594 * is not in the AIL, then it will be OK to flush it delwri as
595 * long as xfs_iflush() does not keep any references to the inode.
596 * We leave that decision up to xfs_iflush() since it has the
597 * knowledge of whether it's OK to simply do a delwri flush of
598 * the inode or whether we need to wait until the inode is
599 * pulled from the AIL.
600 * We get the flush lock regardless, though, just to make sure
601 * we don't free it while it is being flushed.
604 xfs_ilock(ip, XFS_ILOCK_EXCL);
609 * In the case of a forced shutdown we rely on xfs_iflush() to
610 * wait for the inode to be unpinned before returning an error.
612 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
613 /* synchronize with xfs_iflush_done */
618 xfs_iunlock(ip, XFS_ILOCK_EXCL);
624 * We set the inode flag atomically with the radix tree tag.
625 * Once we get tag lookups on the radix tree, this inode flag
629 xfs_inode_set_reclaim_tag(
632 xfs_mount_t *mp = ip->i_mount;
633 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
635 read_lock(&pag->pag_ici_lock);
636 spin_lock(&ip->i_flags_lock);
637 radix_tree_tag_set(&pag->pag_ici_root,
638 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
639 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
640 spin_unlock(&ip->i_flags_lock);
641 read_unlock(&pag->pag_ici_lock);
642 xfs_put_perag(mp, pag);
646 __xfs_inode_clear_reclaim_tag(
651 radix_tree_tag_clear(&pag->pag_ici_root,
652 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
656 xfs_inode_clear_reclaim_tag(
659 xfs_mount_t *mp = ip->i_mount;
660 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
662 read_lock(&pag->pag_ici_lock);
663 spin_lock(&ip->i_flags_lock);
664 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
665 spin_unlock(&ip->i_flags_lock);
666 read_unlock(&pag->pag_ici_lock);
667 xfs_put_perag(mp, pag);
672 xfs_reclaim_inodes_ag(
678 xfs_inode_t *ip = NULL;
679 xfs_perag_t *pag = &mp->m_perag[ag];
681 uint32_t first_index;
689 * use a gang lookup to find the next inode in the tree
690 * as the tree is sparse and a gang lookup walks to find
691 * the number of objects requested.
693 read_lock(&pag->pag_ici_lock);
694 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
695 (void**)&ip, first_index, 1,
696 XFS_ICI_RECLAIM_TAG);
699 read_unlock(&pag->pag_ici_lock);
704 * Update the index for the next lookup. Catch overflows
705 * into the next AG range which can occur if we have inodes
706 * in the last block of the AG and we are currently
707 * pointing to the last inode.
709 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
710 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
711 read_unlock(&pag->pag_ici_lock);
715 /* ignore if already under reclaim */
716 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
717 read_unlock(&pag->pag_ici_lock);
722 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
723 read_unlock(&pag->pag_ici_lock);
726 if (xfs_ipincount(ip) ||
727 !xfs_iflock_nowait(ip)) {
728 xfs_iunlock(ip, XFS_ILOCK_EXCL);
729 read_unlock(&pag->pag_ici_lock);
733 read_unlock(&pag->pag_ici_lock);
736 * hmmm - this is an inode already in reclaim. Do
737 * we even bother catching it here?
739 if (xfs_reclaim_inode(ip, noblock, mode))
759 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
760 if (!mp->m_perag[i].pag_ici_init)
762 xfs_reclaim_inodes_ag(mp, i, noblock, mode);