]> Pileus Git - ~andy/linux/blobdiff - fs/btrfs/extent-tree.c
Merge branch 'topic/misc' into for-linus
[~andy/linux] / fs / btrfs / extent-tree.c
index 01c1f08b976ab81967792866b3ed85c74ce730b2..930ae8949737313a9cabfaa3de787c844bdecdf6 100644 (file)
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                             struct btrfs_root *root,
                             int load_cache_only)
 {
+       DEFINE_WAIT(wait);
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl;
        int ret = 0;
 
-       smp_mb();
-       if (cache->cached != BTRFS_CACHE_NO)
+       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
+       BUG_ON(!caching_ctl);
+
+       INIT_LIST_HEAD(&caching_ctl->list);
+       mutex_init(&caching_ctl->mutex);
+       init_waitqueue_head(&caching_ctl->wait);
+       caching_ctl->block_group = cache;
+       caching_ctl->progress = cache->key.objectid;
+       atomic_set(&caching_ctl->count, 1);
+       caching_ctl->work.func = caching_thread;
+
+       spin_lock(&cache->lock);
+       /*
+        * This should be a rare occasion, but this could happen I think in the
+        * case where one thread starts to load the space cache info, and then
+        * some other thread starts a transaction commit which tries to do an
+        * allocation while the other thread is still loading the space cache
+        * info.  The previous loop should have kept us from choosing this block
+        * group, but if we've moved to the state where we will wait on caching
+        * block groups we need to first check if we're doing a fast load here,
+        * so we can wait for it to finish, otherwise we could end up allocating
+        * from a block group who's cache gets evicted for one reason or
+        * another.
+        */
+       while (cache->cached == BTRFS_CACHE_FAST) {
+               struct btrfs_caching_control *ctl;
+
+               ctl = cache->caching_ctl;
+               atomic_inc(&ctl->count);
+               prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&cache->lock);
+
+               schedule();
+
+               finish_wait(&ctl->wait, &wait);
+               put_caching_control(ctl);
+               spin_lock(&cache->lock);
+       }
+
+       if (cache->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&cache->lock);
+               kfree(caching_ctl);
                return 0;
+       }
+       WARN_ON(cache->caching_ctl);
+       cache->caching_ctl = caching_ctl;
+       cache->cached = BTRFS_CACHE_FAST;
+       spin_unlock(&cache->lock);
 
        /*
         * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        if (trans && (!trans->transaction->in_commit) &&
            (root && root != root->fs_info->tree_root) &&
            btrfs_test_opt(root, SPACE_CACHE)) {
-               spin_lock(&cache->lock);
-               if (cache->cached != BTRFS_CACHE_NO) {
-                       spin_unlock(&cache->lock);
-                       return 0;
-               }
-               cache->cached = BTRFS_CACHE_STARTED;
-               spin_unlock(&cache->lock);
-
                ret = load_free_space_cache(fs_info, cache);
 
                spin_lock(&cache->lock);
                if (ret == 1) {
+                       cache->caching_ctl = NULL;
                        cache->cached = BTRFS_CACHE_FINISHED;
                        cache->last_byte_to_unpin = (u64)-1;
                } else {
-                       cache->cached = BTRFS_CACHE_NO;
+                       if (load_cache_only) {
+                               cache->caching_ctl = NULL;
+                               cache->cached = BTRFS_CACHE_NO;
+                       } else {
+                               cache->cached = BTRFS_CACHE_STARTED;
+                       }
                }
                spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
                if (ret == 1) {
+                       put_caching_control(caching_ctl);
                        free_excluded_extents(fs_info->extent_root, cache);
                        return 0;
                }
+       } else {
+               /*
+                * We are not going to do the fast caching, set cached to the
+                * appropriate value and wakeup any waiters.
+                */
+               spin_lock(&cache->lock);
+               if (load_cache_only) {
+                       cache->caching_ctl = NULL;
+                       cache->cached = BTRFS_CACHE_NO;
+               } else {
+                       cache->cached = BTRFS_CACHE_STARTED;
+               }
+               spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
        }
 
-       if (load_cache_only)
-               return 0;
-
-       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
-       BUG_ON(!caching_ctl);
-
-       INIT_LIST_HEAD(&caching_ctl->list);
-       mutex_init(&caching_ctl->mutex);
-       init_waitqueue_head(&caching_ctl->wait);
-       caching_ctl->block_group = cache;
-       caching_ctl->progress = cache->key.objectid;
-       /* one for caching kthread, one for caching block group list */
-       atomic_set(&caching_ctl->count, 2);
-       caching_ctl->work.func = caching_thread;
-
-       spin_lock(&cache->lock);
-       if (cache->cached != BTRFS_CACHE_NO) {
-               spin_unlock(&cache->lock);
-               kfree(caching_ctl);
+       if (load_cache_only) {
+               put_caching_control(caching_ctl);
                return 0;
        }
-       cache->caching_ctl = caching_ctl;
-       cache->cached = BTRFS_CACHE_STARTED;
-       spin_unlock(&cache->lock);
 
        down_write(&fs_info->extent_commit_sem);
+       atomic_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->extent_commit_sem);
 
@@ -1788,18 +1829,18 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 {
        int ret;
        u64 discarded_bytes = 0;
-       struct btrfs_multi_bio *multi = NULL;
+       struct btrfs_bio *bbio = NULL;
 
 
        /* Tell the block device(s) that the sectors can be discarded */
        ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
-                             bytenr, &num_bytes, &multi, 0);
+                             bytenr, &num_bytes, &bbio, 0);
        if (!ret) {
-               struct btrfs_bio_stripe *stripe = multi->stripes;
+               struct btrfs_bio_stripe *stripe = bbio->stripes;
                int i;
 
 
-               for (i = 0; i < multi->num_stripes; i++, stripe++) {
+               for (i = 0; i < bbio->num_stripes; i++, stripe++) {
                        if (!stripe->dev->can_discard)
                                continue;
 
@@ -1818,7 +1859,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                         */
                        ret = 0;
                }
-               kfree(multi);
+               kfree(bbio);
        }
 
        if (actual_bytes)
@@ -3334,12 +3375,12 @@ out:
 /*
  * shrink metadata reservation for delalloc
  */
-static int shrink_delalloc(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, u64 to_reclaim,
+static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
                           bool wait_ordered)
 {
        struct btrfs_block_rsv *block_rsv;
        struct btrfs_space_info *space_info;
+       struct btrfs_trans_handle *trans;
        u64 reserved;
        u64 max_reclaim;
        u64 reclaimed = 0;
@@ -3348,6 +3389,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        int loops = 0;
        unsigned long progress;
 
+       trans = (struct btrfs_trans_handle *)current->journal_info;
        block_rsv = &root->fs_info->delalloc_block_rsv;
        space_info = block_rsv->space_info;
 
@@ -3374,7 +3416,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                smp_mb();
                nr_pages = min_t(unsigned long, nr_pages,
                       root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
-               writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
+               writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
+                                               WB_REASON_FS_FREE_SPACE);
 
                spin_lock(&space_info->lock);
                if (reserved > space_info->bytes_may_use)
@@ -3417,6 +3460,60 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        return reclaimed >= to_reclaim;
 }
 
+/**
+ * maybe_commit_transaction - possibly commit the transaction if its ok to
+ * @root - the root we're allocating for
+ * @bytes - the number of bytes we want to reserve
+ * @force - force the commit
+ *
+ * This will check to make sure that committing the transaction will actually
+ * get us somewhere and then commit the transaction if it does.  Otherwise it
+ * will return -ENOSPC.
+ */
+static int may_commit_transaction(struct btrfs_root *root,
+                                 struct btrfs_space_info *space_info,
+                                 u64 bytes, int force)
+{
+       struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
+       struct btrfs_trans_handle *trans;
+
+       trans = (struct btrfs_trans_handle *)current->journal_info;
+       if (trans)
+               return -EAGAIN;
+
+       if (force)
+               goto commit;
+
+       /* See if there is enough pinned space to make this reservation */
+       spin_lock(&space_info->lock);
+       if (space_info->bytes_pinned >= bytes) {
+               spin_unlock(&space_info->lock);
+               goto commit;
+       }
+       spin_unlock(&space_info->lock);
+
+       /*
+        * See if there is some space in the delayed insertion reservation for
+        * this reservation.
+        */
+       if (space_info != delayed_rsv->space_info)
+               return -ENOSPC;
+
+       spin_lock(&delayed_rsv->lock);
+       if (delayed_rsv->size < bytes) {
+               spin_unlock(&delayed_rsv->lock);
+               return -ENOSPC;
+       }
+       spin_unlock(&delayed_rsv->lock);
+
+commit:
+       trans = btrfs_join_transaction(root);
+       if (IS_ERR(trans))
+               return -ENOSPC;
+
+       return btrfs_commit_transaction(trans, root);
+}
+
 /**
  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  * @root - the root we're allocating for
@@ -3436,7 +3533,6 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
                                  u64 orig_bytes, int flush)
 {
        struct btrfs_space_info *space_info = block_rsv->space_info;
-       struct btrfs_trans_handle *trans;
        u64 used;
        u64 num_bytes = orig_bytes;
        int retries = 0;
@@ -3445,7 +3541,6 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
        bool flushing = false;
        bool wait_ordered = false;
 
-       trans = (struct btrfs_trans_handle *)current->journal_info;
 again:
        ret = 0;
        spin_lock(&space_info->lock);
@@ -3461,7 +3556,7 @@ again:
                 * deadlock since we are waiting for the flusher to finish, but
                 * hold the current transaction open.
                 */
-               if (trans)
+               if (current->journal_info)
                        return -EAGAIN;
                ret = wait_event_interruptible(space_info->wait,
                                               !space_info->flush);
@@ -3517,12 +3612,16 @@ again:
                 */
                avail = (space_info->total_bytes - space_info->bytes_used) * 8;
                do_div(avail, 10);
-               if (space_info->bytes_pinned >= avail && flush && !trans &&
-                   !committed) {
+               if (space_info->bytes_pinned >= avail && flush && !committed) {
                        space_info->flush = 1;
                        flushing = true;
                        spin_unlock(&space_info->lock);
-                       goto commit;
+                       ret = may_commit_transaction(root, space_info,
+                                                    orig_bytes, 1);
+                       if (ret)
+                               goto out;
+                       committed = true;
+                       goto again;
                }
 
                spin_lock(&root->fs_info->free_chunk_lock);
@@ -3575,7 +3674,7 @@ again:
         * We do synchronous shrinking since we don't actually unreserve
         * metadata until after the IO is completed.
         */
-       ret = shrink_delalloc(trans, root, num_bytes, wait_ordered);
+       ret = shrink_delalloc(root, num_bytes, wait_ordered);
        if (ret < 0)
                goto out;
 
@@ -3592,21 +3691,12 @@ again:
                goto again;
        }
 
-       ret = -EAGAIN;
-       if (trans)
-               goto out;
-
-commit:
        ret = -ENOSPC;
        if (committed)
                goto out;
 
-       trans = btrfs_join_transaction(root);
-       if (IS_ERR(trans))
-               goto out;
-       ret = btrfs_commit_transaction(trans, root);
+       ret = may_commit_transaction(root, space_info, orig_bytes, 0);
        if (!ret) {
-               trans = NULL;
                committed = true;
                goto again;
        }
@@ -3748,16 +3838,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
        kfree(rsv);
 }
 
-int btrfs_block_rsv_add(struct btrfs_root *root,
-                       struct btrfs_block_rsv *block_rsv,
-                       u64 num_bytes)
+static inline int __block_rsv_add(struct btrfs_root *root,
+                                 struct btrfs_block_rsv *block_rsv,
+                                 u64 num_bytes, int flush)
 {
        int ret;
 
        if (num_bytes == 0)
                return 0;
 
-       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
        if (!ret) {
                block_rsv_add_bytes(block_rsv, num_bytes, 1);
                return 0;
@@ -3766,6 +3856,20 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
        return ret;
 }
 
+int btrfs_block_rsv_add(struct btrfs_root *root,
+                       struct btrfs_block_rsv *block_rsv,
+                       u64 num_bytes)
+{
+       return __block_rsv_add(root, block_rsv, num_bytes, 1);
+}
+
+int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
+                               struct btrfs_block_rsv *block_rsv,
+                               u64 num_bytes)
+{
+       return __block_rsv_add(root, block_rsv, num_bytes, 0);
+}
+
 int btrfs_block_rsv_check(struct btrfs_root *root,
                          struct btrfs_block_rsv *block_rsv, int min_factor)
 {
@@ -3997,23 +4101,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  */
 static unsigned drop_outstanding_extent(struct inode *inode)
 {
+       unsigned drop_inode_space = 0;
        unsigned dropped_extents = 0;
 
        BUG_ON(!BTRFS_I(inode)->outstanding_extents);
        BTRFS_I(inode)->outstanding_extents--;
 
+       if (BTRFS_I(inode)->outstanding_extents == 0 &&
+           BTRFS_I(inode)->delalloc_meta_reserved) {
+               drop_inode_space = 1;
+               BTRFS_I(inode)->delalloc_meta_reserved = 0;
+       }
+
        /*
         * If we have more or the same amount of outsanding extents than we have
         * reserved then we need to leave the reserved extents count alone.
         */
        if (BTRFS_I(inode)->outstanding_extents >=
            BTRFS_I(inode)->reserved_extents)
-               return 0;
+               return drop_inode_space;
 
        dropped_extents = BTRFS_I(inode)->reserved_extents -
                BTRFS_I(inode)->outstanding_extents;
        BTRFS_I(inode)->reserved_extents -= dropped_extents;
-       return dropped_extents;
+       return dropped_extents + drop_inode_space;
 }
 
 /**
@@ -4099,9 +4210,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
                BTRFS_I(inode)->reserved_extents += nr_extents;
+       }
 
-               to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
+       /*
+        * Add an item to reserve for updating the inode when we complete the
+        * delalloc io.
+        */
+       if (!BTRFS_I(inode)->delalloc_meta_reserved) {
+               nr_extents++;
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
        }
+
+       to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
        spin_unlock(&BTRFS_I(inode)->lock);
 
@@ -5099,13 +5219,15 @@ search:
                }
 
 have_block_group:
-               if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+               cached = block_group_cache_done(block_group);
+               if (unlikely(!cached)) {
                        u64 free_percent;
 
+                       found_uncached_bg = true;
                        ret = cache_block_group(block_group, trans,
                                                orig_root, 1);
                        if (block_group->cached == BTRFS_CACHE_FINISHED)
-                               goto have_block_group;
+                               goto alloc;
 
                        free_percent = btrfs_block_group_used(&block_group->item);
                        free_percent *= 100;
@@ -5127,7 +5249,6 @@ have_block_group:
                                                        orig_root, 0);
                                BUG_ON(ret);
                        }
-                       found_uncached_bg = true;
 
                        /*
                         * If loop is set for cached only, try the next block
@@ -5137,10 +5258,7 @@ have_block_group:
                                goto loop;
                }
 
-               cached = block_group_cache_done(block_group);
-               if (unlikely(!cached))
-                       found_uncached_bg = true;
-
+alloc:
                if (unlikely(block_group->ro))
                        goto loop;