]> Pileus Git - ~andy/linux/blobdiff - fs/xfs/xfs_log_recover.c
xfs: fix endian warning in xlog_recover_get_buf_lsn()
[~andy/linux] / fs / xfs / xfs_log_recover.c
index 7cf5e4eafe28b1a05890d63e5c1d706d964eef19..1728c7c016a6783b3ec0a835ba843bc746f69129 100644 (file)
@@ -17,7 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_extfree_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_quota.h"
-#include "xfs_utils.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_icreate_item.h"
 
 /* Need all the magic numbers and buffer ops structures from these headers */
 #include "xfs_symlink.h"
 #include "xfs_da_btree.h"
 #include "xfs_dir2_format.h"
-#include "xfs_dir2_priv.h"
+#include "xfs_dir2.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
 
+#define BLK_AVG(blk1, blk2)    ((blk1+blk2) >> 1)
+
 STATIC int
 xlog_find_zeroed(
        struct xlog     *,
@@ -606,7 +608,7 @@ out:
 
 /*
  * Head is defined to be the point of the log where the next log write
- * write could go.  This means that incomplete LR writes at the end are
+ * could go.  This means that incomplete LR writes at the end are
  * eliminated when calculating the head.  We aren't guaranteed that previous
  * LR have complete transactions.  We only know that a cycle number of
  * current cycle number -1 won't be present in the log if we start writing
@@ -962,6 +964,7 @@ xlog_find_tail(
        }
        if (!found) {
                xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
+               xlog_put_bp(bp);
                ASSERT(0);
                return XFS_ERROR(EIO);
        }
@@ -1143,7 +1146,8 @@ xlog_find_zeroed(
                 */
                xfs_warn(log->l_mp,
                        "Log inconsistent or not a log (last==0, first!=1)");
-               return XFS_ERROR(EINVAL);
+               error = XFS_ERROR(EINVAL);
+               goto bp_err;
        }
 
        /* we have a partially zeroed log */
@@ -1617,7 +1621,10 @@ xlog_recover_add_to_trans(
  *        form the cancelled buffer table. Hence they have tobe done last.
  *
  *     3. Inode allocation buffers must be replayed before inode items that
- *        read the buffer and replay changes into it.
+ *        read the buffer and replay changes into it. For filesystems using the
+ *        ICREATE transactions, this means XFS_LI_ICREATE objects need to get
+ *        treated the same as inode allocation buffers as they create and
+ *        initialise the buffers directly.
  *
  *     4. Inode unlink buffers must be replayed after inode items are replayed.
  *        This ensures that inodes are completely flushed to the inode buffer
@@ -1632,10 +1639,17 @@ xlog_recover_add_to_trans(
  * from all the other buffers and move them to last.
  *
  * Hence, 4 lists, in order from head to tail:
- *     - buffer_list for all buffers except cancelled/inode unlink buffers
- *     - item_list for all non-buffer items
- *     - inode_buffer_list for inode unlink buffers
- *     - cancel_list for the cancelled buffers
+ *     - buffer_list for all buffers except cancelled/inode unlink buffers
+ *     - item_list for all non-buffer items
+ *     - inode_buffer_list for inode unlink buffers
+ *     - cancel_list for the cancelled buffers
+ *
+ * Note that we add objects to the tail of the lists so that first-to-last
+ * ordering is preserved within the lists. Adding objects to the head of the
+ * list means when we traverse from the head we walk them in last-to-first
+ * order. For cancelled buffers and inode unlink buffers this doesn't matter,
+ * but for all other items there may be specific ordering that we need to
+ * preserve.
  */
 STATIC int
 xlog_recover_reorder_trans(
@@ -1655,6 +1669,9 @@ xlog_recover_reorder_trans(
                xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
 
                switch (ITEM_TYPE(item)) {
+               case XFS_LI_ICREATE:
+                       list_move_tail(&item->ri_list, &buffer_list);
+                       break;
                case XFS_LI_BUF:
                        if (buf_f->blf_flags & XFS_BLF_CANCEL) {
                                trace_xfs_log_recover_item_reorder_head(log,
@@ -1752,19 +1769,11 @@ xlog_recover_buffer_pass1(
 
 /*
  * Check to see whether the buffer being recovered has a corresponding
- * entry in the buffer cancel record table.  If it does then return 1
- * so that it will be cancelled, otherwise return 0.  If the buffer is
- * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
- * the refcount on the entry in the table and remove it from the table
- * if this is the last reference.
- *
- * We remove the cancel record from the table when we encounter its
- * last occurrence in the log so that if the same buffer is re-used
- * again after its last cancellation we actually replay the changes
- * made at that point.
+ * entry in the buffer cancel record table. If it is, return the cancel
+ * buffer structure to the caller.
  */
-STATIC int
-xlog_check_buffer_cancelled(
+STATIC struct xfs_buf_cancel *
+xlog_peek_buffer_cancelled(
        struct xlog             *log,
        xfs_daddr_t             blkno,
        uint                    len,
@@ -1773,22 +1782,16 @@ xlog_check_buffer_cancelled(
        struct list_head        *bucket;
        struct xfs_buf_cancel   *bcp;
 
-       if (log->l_buf_cancel_table == NULL) {
-               /*
-                * There is nothing in the table built in pass one,
-                * so this buffer must not be cancelled.
-                */
+       if (!log->l_buf_cancel_table) {
+               /* empty table means no cancelled buffers in the log */
                ASSERT(!(flags & XFS_BLF_CANCEL));
-               return 0;
+               return NULL;
        }
 
-       /*
-        * Search for an entry in the  cancel table that matches our buffer.
-        */
        bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
        list_for_each_entry(bcp, bucket, bc_list) {
                if (bcp->bc_blkno == blkno && bcp->bc_len == len)
-                       goto found;
+                       return bcp;
        }
 
        /*
@@ -1796,9 +1799,32 @@ xlog_check_buffer_cancelled(
         * that the buffer is NOT cancelled.
         */
        ASSERT(!(flags & XFS_BLF_CANCEL));
-       return 0;
+       return NULL;
+}
+
+/*
+ * If the buffer is being cancelled then return 1 so that it will be cancelled,
+ * otherwise return 0.  If the buffer is actually a buffer cancel item
+ * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
+ * table and remove it from the table if this is the last reference.
+ *
+ * We remove the cancel record from the table when we encounter its last
+ * occurrence in the log so that if the same buffer is re-used again after its
+ * last cancellation we actually replay the changes made at that point.
+ */
+STATIC int
+xlog_check_buffer_cancelled(
+       struct xlog             *log,
+       xfs_daddr_t             blkno,
+       uint                    len,
+       ushort                  flags)
+{
+       struct xfs_buf_cancel   *bcp;
+
+       bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
+       if (!bcp)
+               return 0;
 
-found:
        /*
         * We've go a match, so return 1 so that the recovery of this buffer
         * is cancelled.  If this buffer is actually a buffer cancel log
@@ -1932,6 +1958,104 @@ xlog_recover_do_inode_buffer(
        return 0;
 }
 
+/*
+ * V5 filesystems know the age of the buffer on disk being recovered. We can
+ * have newer objects on disk than we are replaying, and so for these cases we
+ * don't want to replay the current change as that will make the buffer contents
+ * temporarily invalid on disk.
+ *
+ * The magic number might not match the buffer type we are going to recover
+ * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
+ * extract the LSN of the existing object in the buffer based on it's current
+ * magic number.  If we don't recognise the magic number in the buffer, then
+ * return a LSN of -1 so that the caller knows it was an unrecognised block and
+ * so can recover the buffer.
+ */
+static xfs_lsn_t
+xlog_recover_get_buf_lsn(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp)
+{
+       __uint32_t              magic32;
+       __uint16_t              magic16;
+       __uint16_t              magicda;
+       void                    *blk = bp->b_addr;
+
+       /* v4 filesystems always recover immediately */
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               goto recover_immediately;
+
+       magic32 = be32_to_cpu(*(__be32 *)blk);
+       switch (magic32) {
+       case XFS_ABTB_CRC_MAGIC:
+       case XFS_ABTC_CRC_MAGIC:
+       case XFS_ABTB_MAGIC:
+       case XFS_ABTC_MAGIC:
+       case XFS_IBT_CRC_MAGIC:
+       case XFS_IBT_MAGIC:
+               return be64_to_cpu(
+                               ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn);
+       case XFS_BMAP_CRC_MAGIC:
+       case XFS_BMAP_MAGIC:
+               return be64_to_cpu(
+                               ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn);
+       case XFS_AGF_MAGIC:
+               return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+       case XFS_AGFL_MAGIC:
+               return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+       case XFS_AGI_MAGIC:
+               return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+       case XFS_SYMLINK_MAGIC:
+               return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+       case XFS_DIR3_BLOCK_MAGIC:
+       case XFS_DIR3_DATA_MAGIC:
+       case XFS_DIR3_FREE_MAGIC:
+               return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+       case XFS_ATTR3_RMT_MAGIC:
+               return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+       case XFS_SB_MAGIC:
+               return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+       default:
+               break;
+       }
+
+       magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
+       switch (magicda) {
+       case XFS_DIR3_LEAF1_MAGIC:
+       case XFS_DIR3_LEAFN_MAGIC:
+       case XFS_DA3_NODE_MAGIC:
+               return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+       default:
+               break;
+       }
+
+       /*
+        * We do individual object checks on dquot and inode buffers as they
+        * have their own individual LSN records. Also, we could have a stale
+        * buffer here, so we have to at least recognise these buffer types.
+        *
+        * A notd complexity here is inode unlinked list processing - it logs
+        * the inode directly in the buffer, but we don't know which inodes have
+        * been modified, and there is no global buffer LSN. Hence we need to
+        * recover all inode buffer types immediately. This problem will be
+        * fixed by logical logging of the unlinked list modifications.
+        */
+       magic16 = be16_to_cpu(*(__be16 *)blk);
+       switch (magic16) {
+       case XFS_DQUOT_MAGIC:
+       case XFS_DINODE_MAGIC:
+               goto recover_immediately;
+       default:
+               break;
+       }
+
+       /* unknown buffer contents, recover immediately */
+
+recover_immediately:
+       return (xfs_lsn_t)-1;
+
+}
+
 /*
  * Validate the recovered buffer is of the correct type and attach the
  * appropriate buffer operations to them for writeback. Magic numbers are in a
@@ -1941,7 +2065,7 @@ xlog_recover_do_inode_buffer(
  *     inside a struct xfs_da_blkinfo at the start of the buffer.
  */
 static void
-xlog_recovery_validate_buf_type(
+xlog_recover_validate_buf_type(
        struct xfs_mount        *mp,
        struct xfs_buf          *bp,
        xfs_buf_log_format_t    *buf_f)
@@ -2220,7 +2344,7 @@ xlog_recover_do_reg_buffer(
         * just avoid the verification stage for non-crc filesystems
         */
        if (xfs_sb_version_hascrc(&mp->m_sb))
-               xlog_recovery_validate_buf_type(mp, bp, buf_f);
+               xlog_recover_validate_buf_type(mp, bp, buf_f);
 }
 
 /*
@@ -2352,7 +2476,7 @@ xfs_qm_dqcheck(
 
 /*
  * Perform a dquot buffer recovery.
- * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
+ * Simple algorithm: if we have found a QUOTAOFF log item of the same type
  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
  * Else, treat it as a regular buffer and do recovery.
  */
@@ -2411,20 +2535,22 @@ xlog_recover_do_dquot_buffer(
  * over the log during recovery.  During the first we build a table of
  * those buffers which have been cancelled, and during the second we
  * only replay those buffers which do not have corresponding cancel
- * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
+ * records in the table.  See xlog_recover_buffer_pass[1,2] above
  * for more details on the implementation of the table of cancel records.
  */
 STATIC int
 xlog_recover_buffer_pass2(
        struct xlog                     *log,
        struct list_head                *buffer_list,
-       struct xlog_recover_item        *item)
+       struct xlog_recover_item        *item,
+       xfs_lsn_t                       current_lsn)
 {
        xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
        xfs_mount_t             *mp = log->l_mp;
        xfs_buf_t               *bp;
        int                     error;
        uint                    buf_flags;
+       xfs_lsn_t               lsn;
 
        /*
         * In this pass we only want to recover all the buffers which have
@@ -2449,10 +2575,17 @@ xlog_recover_buffer_pass2(
        error = bp->b_error;
        if (error) {
                xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
-               xfs_buf_relse(bp);
-               return error;
+               goto out_release;
        }
 
+       /*
+        * recover the buffer only if we get an LSN from it and it's less than
+        * the lsn of the transaction we are replaying.
+        */
+       lsn = xlog_recover_get_buf_lsn(mp, bp);
+       if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
+               goto out_release;
+
        if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
                error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
        } else if (buf_f->blf_flags &
@@ -2462,7 +2595,7 @@ xlog_recover_buffer_pass2(
                xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
        }
        if (error)
-               return XFS_ERROR(error);
+               goto out_release;
 
        /*
         * Perform delayed write on the buffer.  Asynchronous writes will be
@@ -2491,6 +2624,7 @@ xlog_recover_buffer_pass2(
                xfs_buf_delwri_queue(bp, buffer_list);
        }
 
+out_release:
        xfs_buf_relse(bp);
        return error;
 }
@@ -2499,7 +2633,8 @@ STATIC int
 xlog_recover_inode_pass2(
        struct xlog                     *log,
        struct list_head                *buffer_list,
-       struct xlog_recover_item        *item)
+       struct xlog_recover_item        *item,
+       xfs_lsn_t                       current_lsn)
 {
        xfs_inode_log_format_t  *in_f;
        xfs_mount_t             *mp = log->l_mp;
@@ -2578,8 +2713,30 @@ xlog_recover_inode_pass2(
                goto error;
        }
 
-       /* Skip replay when the on disk inode is newer than the log one */
-       if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+       /*
+        * If the inode has an LSN in it, recover the inode only if it's less
+        * than the lsn of the transaction we are replaying.
+        */
+       if (dip->di_version >= 3) {
+               xfs_lsn_t       lsn = be64_to_cpu(dip->di_lsn);
+
+               if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+                       trace_xfs_log_recover_inode_skip(log, in_f);
+                       error = 0;
+                       goto out_release;
+               }
+       }
+
+       /*
+        * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
+        * are transactional and if ordering is necessary we can determine that
+        * more accurately by the LSN field in the V3 inode core. Don't trust
+        * the inode versions we might be changing them here - use the
+        * superblock flag to determine whether we need to look at di_flushiter
+        * to skip replay when the on disk inode is newer than the log one
+        */
+       if (!xfs_sb_version_hascrc(&mp->m_sb) &&
+           dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
                /*
                 * Deal with the wrap case, DI_MAX_FLUSH is less
                 * than smaller numbers
@@ -2594,6 +2751,7 @@ xlog_recover_inode_pass2(
                        goto error;
                }
        }
+
        /* Take the opportunity to reset the flush iteration count */
        dicp->di_flushiter = 0;
 
@@ -2758,6 +2916,8 @@ write_inode_buffer:
        ASSERT(bp->b_target->bt_mount == mp);
        bp->b_iodone = xlog_recover_iodone;
        xfs_buf_delwri_queue(bp, buffer_list);
+
+out_release:
        xfs_buf_relse(bp);
 error:
        if (need_free)
@@ -2799,7 +2959,8 @@ STATIC int
 xlog_recover_dquot_pass2(
        struct xlog                     *log,
        struct list_head                *buffer_list,
-       struct xlog_recover_item        *item)
+       struct xlog_recover_item        *item,
+       xfs_lsn_t                       current_lsn)
 {
        xfs_mount_t             *mp = log->l_mp;
        xfs_buf_t               *bp;
@@ -2873,6 +3034,19 @@ xlog_recover_dquot_pass2(
                return XFS_ERROR(EIO);
        }
 
+       /*
+        * If the dquot has an LSN in it, recover the dquot only if it's less
+        * than the lsn of the transaction we are replaying.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
+               xfs_lsn_t       lsn = be64_to_cpu(dqb->dd_lsn);
+
+               if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+                       goto out_release;
+               }
+       }
+
        memcpy(ddq, recddq, item->ri_buf[1].i_len);
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
@@ -2883,9 +3057,10 @@ xlog_recover_dquot_pass2(
        ASSERT(bp->b_target->bt_mount == mp);
        bp->b_iodone = xlog_recover_iodone;
        xfs_buf_delwri_queue(bp, buffer_list);
-       xfs_buf_relse(bp);
 
-       return (0);
+out_release:
+       xfs_buf_relse(bp);
+       return 0;
 }
 
 /*
@@ -2981,6 +3156,93 @@ xlog_recover_efd_pass2(
        return 0;
 }
 
+/*
+ * This routine is called when an inode create format structure is found in a
+ * committed transaction in the log.  It's purpose is to initialise the inodes
+ * being allocated on disk. This requires us to get inode cluster buffers that
+ * match the range to be intialised, stamped with inode templates and written
+ * by delayed write so that subsequent modifications will hit the cached buffer
+ * and only need writing out at the end of recovery.
+ */
+STATIC int
+xlog_recover_do_icreate_pass2(
+       struct xlog             *log,
+       struct list_head        *buffer_list,
+       xlog_recover_item_t     *item)
+{
+       struct xfs_mount        *mp = log->l_mp;
+       struct xfs_icreate_log  *icl;
+       xfs_agnumber_t          agno;
+       xfs_agblock_t           agbno;
+       unsigned int            count;
+       unsigned int            isize;
+       xfs_agblock_t           length;
+
+       icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
+       if (icl->icl_type != XFS_LI_ICREATE) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
+               return EINVAL;
+       }
+
+       if (icl->icl_size != 1) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
+               return EINVAL;
+       }
+
+       agno = be32_to_cpu(icl->icl_ag);
+       if (agno >= mp->m_sb.sb_agcount) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
+               return EINVAL;
+       }
+       agbno = be32_to_cpu(icl->icl_agbno);
+       if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
+               return EINVAL;
+       }
+       isize = be32_to_cpu(icl->icl_isize);
+       if (isize != mp->m_sb.sb_inodesize) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
+               return EINVAL;
+       }
+       count = be32_to_cpu(icl->icl_count);
+       if (!count) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
+               return EINVAL;
+       }
+       length = be32_to_cpu(icl->icl_length);
+       if (!length || length >= mp->m_sb.sb_agblocks) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
+               return EINVAL;
+       }
+
+       /* existing allocation is fixed value */
+       ASSERT(count == XFS_IALLOC_INODES(mp));
+       ASSERT(length == XFS_IALLOC_BLOCKS(mp));
+       if (count != XFS_IALLOC_INODES(mp) ||
+            length != XFS_IALLOC_BLOCKS(mp)) {
+               xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
+               return EINVAL;
+       }
+
+       /*
+        * Inode buffers can be freed. Do not replay the inode initialisation as
+        * we could be overwriting something written after this inode buffer was
+        * cancelled.
+        *
+        * XXX: we need to iterate all buffers and only init those that are not
+        * cancelled. I think that a more fine grained factoring of
+        * xfs_ialloc_inode_init may be appropriate here to enable this to be
+        * done easily.
+        */
+       if (xlog_check_buffer_cancelled(log,
+                       XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
+               return 0;
+
+       xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
+                                       be32_to_cpu(icl->icl_gen));
+       return 0;
+}
+
 /*
  * Free up any resources allocated by the transaction
  *
@@ -3006,6 +3268,106 @@ xlog_recover_free_trans(
        kmem_free(trans);
 }
 
+STATIC void
+xlog_recover_buffer_ra_pass2(
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
+{
+       struct xfs_buf_log_format       *buf_f = item->ri_buf[0].i_addr;
+       struct xfs_mount                *mp = log->l_mp;
+
+       if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
+                       buf_f->blf_len, buf_f->blf_flags)) {
+               return;
+       }
+
+       xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
+                               buf_f->blf_len, NULL);
+}
+
+STATIC void
+xlog_recover_inode_ra_pass2(
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
+{
+       struct xfs_inode_log_format     ilf_buf;
+       struct xfs_inode_log_format     *ilfp;
+       struct xfs_mount                *mp = log->l_mp;
+       int                     error;
+
+       if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
+               ilfp = item->ri_buf[0].i_addr;
+       } else {
+               ilfp = &ilf_buf;
+               memset(ilfp, 0, sizeof(*ilfp));
+               error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
+               if (error)
+                       return;
+       }
+
+       if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
+               return;
+
+       xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
+                               ilfp->ilf_len, &xfs_inode_buf_ra_ops);
+}
+
+STATIC void
+xlog_recover_dquot_ra_pass2(
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
+{
+       struct xfs_mount        *mp = log->l_mp;
+       struct xfs_disk_dquot   *recddq;
+       struct xfs_dq_logformat *dq_f;
+       uint                    type;
+
+
+       if (mp->m_qflags == 0)
+               return;
+
+       recddq = item->ri_buf[1].i_addr;
+       if (recddq == NULL)
+               return;
+       if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
+               return;
+
+       type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
+       ASSERT(type);
+       if (log->l_quotaoffs_flag & type)
+               return;
+
+       dq_f = item->ri_buf[0].i_addr;
+       ASSERT(dq_f);
+       ASSERT(dq_f->qlf_len == 1);
+
+       xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
+                         XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
+}
+
+STATIC void
+xlog_recover_ra_pass2(
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
+{
+       switch (ITEM_TYPE(item)) {
+       case XFS_LI_BUF:
+               xlog_recover_buffer_ra_pass2(log, item);
+               break;
+       case XFS_LI_INODE:
+               xlog_recover_inode_ra_pass2(log, item);
+               break;
+       case XFS_LI_DQUOT:
+               xlog_recover_dquot_ra_pass2(log, item);
+               break;
+       case XFS_LI_EFI:
+       case XFS_LI_EFD:
+       case XFS_LI_QUOTAOFF:
+       default:
+               break;
+       }
+}
+
 STATIC int
 xlog_recover_commit_pass1(
        struct xlog                     *log,
@@ -3023,6 +3385,7 @@ xlog_recover_commit_pass1(
        case XFS_LI_EFI:
        case XFS_LI_EFD:
        case XFS_LI_DQUOT:
+       case XFS_LI_ICREATE:
                /* nothing to do in pass 1 */
                return 0;
        default:
@@ -3044,15 +3407,20 @@ xlog_recover_commit_pass2(
 
        switch (ITEM_TYPE(item)) {
        case XFS_LI_BUF:
-               return xlog_recover_buffer_pass2(log, buffer_list, item);
+               return xlog_recover_buffer_pass2(log, buffer_list, item,
+                                                trans->r_lsn);
        case XFS_LI_INODE:
-               return xlog_recover_inode_pass2(log, buffer_list, item);
+               return xlog_recover_inode_pass2(log, buffer_list, item,
+                                                trans->r_lsn);
        case XFS_LI_EFI:
                return xlog_recover_efi_pass2(log, item, trans->r_lsn);
        case XFS_LI_EFD:
                return xlog_recover_efd_pass2(log, item);
        case XFS_LI_DQUOT:
-               return xlog_recover_dquot_pass2(log, buffer_list, item);
+               return xlog_recover_dquot_pass2(log, buffer_list, item,
+                                               trans->r_lsn);
+       case XFS_LI_ICREATE:
+               return xlog_recover_do_icreate_pass2(log, buffer_list, item);
        case XFS_LI_QUOTAOFF:
                /* nothing to do in pass2 */
                return 0;
@@ -3064,6 +3432,26 @@ xlog_recover_commit_pass2(
        }
 }
 
+STATIC int
+xlog_recover_items_pass2(
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct list_head                *buffer_list,
+       struct list_head                *item_list)
+{
+       struct xlog_recover_item        *item;
+       int                             error = 0;
+
+       list_for_each_entry(item, item_list, ri_list) {
+               error = xlog_recover_commit_pass2(log, trans,
+                                         buffer_list, item);
+               if (error)
+                       return error;
+       }
+
+       return error;
+}
+
 /*
  * Perform the transaction.
  *
@@ -3076,9 +3464,16 @@ xlog_recover_commit_trans(
        struct xlog_recover     *trans,
        int                     pass)
 {
-       int                     error = 0, error2;
-       xlog_recover_item_t     *item;
-       LIST_HEAD               (buffer_list);
+       int                             error = 0;
+       int                             error2;
+       int                             items_queued = 0;
+       struct xlog_recover_item        *item;
+       struct xlog_recover_item        *next;
+       LIST_HEAD                       (buffer_list);
+       LIST_HEAD                       (ra_list);
+       LIST_HEAD                       (done_list);
+
+       #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
 
        hlist_del(&trans->r_list);
 
@@ -3086,14 +3481,22 @@ xlog_recover_commit_trans(
        if (error)
                return error;
 
-       list_for_each_entry(item, &trans->r_itemq, ri_list) {
+       list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
                switch (pass) {
                case XLOG_RECOVER_PASS1:
                        error = xlog_recover_commit_pass1(log, trans, item);
                        break;
                case XLOG_RECOVER_PASS2:
-                       error = xlog_recover_commit_pass2(log, trans,
-                                                         &buffer_list, item);
+                       xlog_recover_ra_pass2(log, item);
+                       list_move_tail(&item->ri_list, &ra_list);
+                       items_queued++;
+                       if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
+                               error = xlog_recover_items_pass2(log, trans,
+                                               &buffer_list, &ra_list);
+                               list_splice_tail_init(&ra_list, &done_list);
+                               items_queued = 0;
+                       }
+
                        break;
                default:
                        ASSERT(0);
@@ -3103,9 +3506,19 @@ xlog_recover_commit_trans(
                        goto out;
        }
 
+out:
+       if (!list_empty(&ra_list)) {
+               if (!error)
+                       error = xlog_recover_items_pass2(log, trans,
+                                       &buffer_list, &ra_list);
+               list_splice_tail_init(&ra_list, &done_list);
+       }
+
+       if (!list_empty(&done_list))
+               list_splice_init(&done_list, &trans->r_itemq);
+
        xlog_recover_free_trans(trans);
 
-out:
        error2 = xfs_buf_delwri_submit(&buffer_list);
        return error ? error : error2;
 }
@@ -3263,7 +3676,7 @@ xlog_recover_process_efi(
        }
 
        tp = xfs_trans_alloc(mp, 0);
-       error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
        if (error)
                goto abort_error;
        efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
@@ -3369,8 +3782,7 @@ xlog_recover_clear_agi_bucket(
        int             error;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
-       error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
-                                 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
        if (error)
                goto out_abort;