]> Pileus Git - ~andy/linux/blobdiff - fs/btrfs/transaction.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[~andy/linux] / fs / btrfs / transaction.c
index e5fe801659ba8389b0c912baed19e39ea15655c4..34cd83184c4ad2ff7ce85bb13fea48dfb61198b4 100644 (file)
@@ -62,7 +62,6 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
        WARN_ON(atomic_read(&transaction->use_count) == 0);
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
-               WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.root));
                WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
                while (!list_empty(&transaction->pending_chunks)) {
                        struct extent_map *em;
@@ -184,9 +183,8 @@ loop:
        atomic_set(&cur_trans->use_count, 2);
        cur_trans->start_time = get_seconds();
 
-       cur_trans->delayed_refs.root = RB_ROOT;
        cur_trans->delayed_refs.href_root = RB_ROOT;
-       cur_trans->delayed_refs.num_entries = 0;
+       atomic_set(&cur_trans->delayed_refs.num_entries, 0);
        cur_trans->delayed_refs.num_heads_ready = 0;
        cur_trans->delayed_refs.num_heads = 0;
        cur_trans->delayed_refs.flushing = 0;
@@ -206,9 +204,6 @@ loop:
        atomic64_set(&fs_info->tree_mod_seq, 0);
 
        spin_lock_init(&cur_trans->delayed_refs.lock);
-       atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
-       atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
-       init_waitqueue_head(&cur_trans->delayed_refs.wait);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
        INIT_LIST_HEAD(&cur_trans->ordered_operations);
@@ -474,6 +469,7 @@ again:
        h->type = type;
        h->allocating_chunk = false;
        h->reloc_reserved = false;
+       h->sync = false;
        INIT_LIST_HEAD(&h->qgroup_ref_list);
        INIT_LIST_HEAD(&h->new_bgs);
 
@@ -649,7 +645,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root)
 {
        if (root->fs_info->global_block_rsv.space_info->full &&
-           btrfs_should_throttle_delayed_refs(trans, root))
+           btrfs_check_space_for_delayed_refs(trans, root))
                return 1;
 
        return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
@@ -713,8 +709,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                btrfs_create_pending_block_groups(trans, root);
 
        trans->delayed_ref_updates = 0;
-       if (btrfs_should_throttle_delayed_refs(trans, root)) {
-               cur = max_t(unsigned long, cur, 1);
+       if (!trans->sync && btrfs_should_throttle_delayed_refs(trans, root)) {
+               cur = max_t(unsigned long, cur, 32);
                trans->delayed_ref_updates = 0;
                btrfs_run_delayed_refs(trans, root, cur);
        }
@@ -1830,6 +1826,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
        }
 
+       /*
+        * Since the transaction is done, we should set the inode map cache flag
+        * before any other comming transaction.
+        */
+       if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
+               btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+       else
+               btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+
        /* commit_fs_roots gets rid of all the tree log roots, it is now
         * safe to free the root of tree log roots
         */
@@ -1972,6 +1977,19 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
        }
        root = list_first_entry(&fs_info->dead_roots,
                        struct btrfs_root, root_list);
+       /*
+        * Make sure root is not involved in send,
+        * if we fail with first root, we return
+        * directly rather than continue.
+        */
+       spin_lock(&root->root_item_lock);
+       if (root->send_in_progress) {
+               spin_unlock(&fs_info->trans_lock);
+               spin_unlock(&root->root_item_lock);
+               return 0;
+       }
+       spin_unlock(&root->root_item_lock);
+
        list_del_init(&root->root_list);
        spin_unlock(&fs_info->trans_lock);