]> Pileus Git - ~andy/linux/blobdiff - fs/btrfs/transaction.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[~andy/linux] / fs / btrfs / transaction.c
index c6a872a8a46862948e93c343cdd0c7479caf3883..34cd83184c4ad2ff7ce85bb13fea48dfb61198b4 100644 (file)
@@ -62,7 +62,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
        WARN_ON(atomic_read(&transaction->use_count) == 0);
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
-               WARN_ON(transaction->delayed_refs.root.rb_node);
+               WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
                while (!list_empty(&transaction->pending_chunks)) {
                        struct extent_map *em;
 
@@ -183,8 +183,8 @@ loop:
        atomic_set(&cur_trans->use_count, 2);
        cur_trans->start_time = get_seconds();
 
-       cur_trans->delayed_refs.root = RB_ROOT;
-       cur_trans->delayed_refs.num_entries = 0;
+       cur_trans->delayed_refs.href_root = RB_ROOT;
+       atomic_set(&cur_trans->delayed_refs.num_entries, 0);
        cur_trans->delayed_refs.num_heads_ready = 0;
        cur_trans->delayed_refs.num_heads = 0;
        cur_trans->delayed_refs.flushing = 0;
@@ -196,17 +196,14 @@ loop:
         */
        smp_mb();
        if (!list_empty(&fs_info->tree_mod_seq_list))
-               WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+               WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
                        "creating a fresh transaction\n");
        if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
-               WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+               WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
                        "creating a fresh transaction\n");
        atomic64_set(&fs_info->tree_mod_seq, 0);
 
        spin_lock_init(&cur_trans->delayed_refs.lock);
-       atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
-       atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
-       init_waitqueue_head(&cur_trans->delayed_refs.wait);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
        INIT_LIST_HEAD(&cur_trans->ordered_operations);
@@ -472,6 +469,7 @@ again:
        h->type = type;
        h->allocating_chunk = false;
        h->reloc_reserved = false;
+       h->sync = false;
        INIT_LIST_HEAD(&h->qgroup_ref_list);
        INIT_LIST_HEAD(&h->new_bgs);
 
@@ -647,7 +645,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root)
 {
        if (root->fs_info->global_block_rsv.space_info->full &&
-           btrfs_should_throttle_delayed_refs(trans, root))
+           btrfs_check_space_for_delayed_refs(trans, root))
                return 1;
 
        return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
@@ -711,8 +709,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                btrfs_create_pending_block_groups(trans, root);
 
        trans->delayed_ref_updates = 0;
-       if (btrfs_should_throttle_delayed_refs(trans, root)) {
-               cur = max_t(unsigned long, cur, 1);
+       if (!trans->sync && btrfs_should_throttle_delayed_refs(trans, root)) {
+               cur = max_t(unsigned long, cur, 32);
                trans->delayed_ref_updates = 0;
                btrfs_run_delayed_refs(trans, root, cur);
        }
@@ -788,12 +786,6 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
        return __btrfs_end_transaction(trans, root, 1);
 }
 
-int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root)
-{
-       return __btrfs_end_transaction(trans, root, 1);
-}
-
 /*
  * when btree blocks are allocated, they have some corresponding bits set for
  * them in one of two extent_io trees.  This is used to make sure all of
@@ -1105,7 +1097,7 @@ int btrfs_defrag_root(struct btrfs_root *root)
                        break;
 
                if (btrfs_defrag_cancelled(root->fs_info)) {
-                       printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
+                       pr_debug("BTRFS: defrag_root cancelled\n");
                        ret = -EAGAIN;
                        break;
                }
@@ -1746,6 +1738,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
 
        btrfs_wait_delalloc_flush(root->fs_info);
+
+       btrfs_scrub_pause(root);
        /*
         * Ok now we need to make sure to block out any other joins while we
         * commit the transaction.  We could have started a join before setting
@@ -1810,7 +1804,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        WARN_ON(cur_trans != trans->transaction);
 
-       btrfs_scrub_pause(root);
        /* btrfs_commit_tree_roots is responsible for getting the
         * various roots consistent with each other.  Every pointer
         * in the tree of tree roots has to point to the most up to date
@@ -1833,6 +1826,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
        }
 
+       /*
+        * Since the transaction is done, we should set the inode map cache flag
+        * before any other comming transaction.
+        */
+       if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
+               btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+       else
+               btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+
        /* commit_fs_roots gets rid of all the tree log roots, it is now
         * safe to free the root of tree log roots
         */
@@ -1975,10 +1977,23 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
        }
        root = list_first_entry(&fs_info->dead_roots,
                        struct btrfs_root, root_list);
+       /*
+        * Make sure root is not involved in send,
+        * if we fail with first root, we return
+        * directly rather than continue.
+        */
+       spin_lock(&root->root_item_lock);
+       if (root->send_in_progress) {
+               spin_unlock(&fs_info->trans_lock);
+               spin_unlock(&root->root_item_lock);
+               return 0;
+       }
+       spin_unlock(&root->root_item_lock);
+
        list_del_init(&root->root_list);
        spin_unlock(&fs_info->trans_lock);
 
-       pr_debug("btrfs: cleaner removing %llu\n", root->objectid);
+       pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
 
        btrfs_kill_all_delayed_nodes(root);