]> Pileus Git - ~andy/linux/blob - fs/btrfs/extent-tree.c
Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[~andy/linux] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38 #include "sysfs.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271                                        cache->key.objectid, bytenr,
272                                        0, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(root, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (cache->cached != BTRFS_CACHE_STARTED) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         /* We're loading it the fast way, so we don't have a caching_ctl. */
321         if (!cache->caching_ctl) {
322                 spin_unlock(&cache->lock);
323                 return NULL;
324         }
325
326         ctl = cache->caching_ctl;
327         atomic_inc(&ctl->count);
328         spin_unlock(&cache->lock);
329         return ctl;
330 }
331
332 static void put_caching_control(struct btrfs_caching_control *ctl)
333 {
334         if (atomic_dec_and_test(&ctl->count))
335                 kfree(ctl);
336 }
337
338 /*
339  * this is only called by cache_block_group, since we could have freed extents
340  * we need to check the pinned_extents for any extents that can't be used yet
341  * since their free space will be released as soon as the transaction commits.
342  */
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344                               struct btrfs_fs_info *info, u64 start, u64 end)
345 {
346         u64 extent_start, extent_end, size, total_added = 0;
347         int ret;
348
349         while (start < end) {
350                 ret = find_first_extent_bit(info->pinned_extents, start,
351                                             &extent_start, &extent_end,
352                                             EXTENT_DIRTY | EXTENT_UPTODATE,
353                                             NULL);
354                 if (ret)
355                         break;
356
357                 if (extent_start <= start) {
358                         start = extent_end + 1;
359                 } else if (extent_start > start && extent_start < end) {
360                         size = extent_start - start;
361                         total_added += size;
362                         ret = btrfs_add_free_space(block_group, start,
363                                                    size);
364                         BUG_ON(ret); /* -ENOMEM or logic error */
365                         start = extent_end + 1;
366                 } else {
367                         break;
368                 }
369         }
370
371         if (start < end) {
372                 size = end - start;
373                 total_added += size;
374                 ret = btrfs_add_free_space(block_group, start, size);
375                 BUG_ON(ret); /* -ENOMEM or logic error */
376         }
377
378         return total_added;
379 }
380
381 static noinline void caching_thread(struct btrfs_work *work)
382 {
383         struct btrfs_block_group_cache *block_group;
384         struct btrfs_fs_info *fs_info;
385         struct btrfs_caching_control *caching_ctl;
386         struct btrfs_root *extent_root;
387         struct btrfs_path *path;
388         struct extent_buffer *leaf;
389         struct btrfs_key key;
390         u64 total_found = 0;
391         u64 last = 0;
392         u32 nritems;
393         int ret = -ENOMEM;
394
395         caching_ctl = container_of(work, struct btrfs_caching_control, work);
396         block_group = caching_ctl->block_group;
397         fs_info = block_group->fs_info;
398         extent_root = fs_info->extent_root;
399
400         path = btrfs_alloc_path();
401         if (!path)
402                 goto out;
403
404         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
405
406         /*
407          * We don't want to deadlock with somebody trying to allocate a new
408          * extent for the extent root while also trying to search the extent
409          * root to add free space.  So we skip locking and search the commit
410          * root, since its read-only
411          */
412         path->skip_locking = 1;
413         path->search_commit_root = 1;
414         path->reada = 1;
415
416         key.objectid = last;
417         key.offset = 0;
418         key.type = BTRFS_EXTENT_ITEM_KEY;
419 again:
420         mutex_lock(&caching_ctl->mutex);
421         /* need to make sure the commit_root doesn't disappear */
422         down_read(&fs_info->extent_commit_sem);
423
424 next:
425         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
426         if (ret < 0)
427                 goto err;
428
429         leaf = path->nodes[0];
430         nritems = btrfs_header_nritems(leaf);
431
432         while (1) {
433                 if (btrfs_fs_closing(fs_info) > 1) {
434                         last = (u64)-1;
435                         break;
436                 }
437
438                 if (path->slots[0] < nritems) {
439                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
440                 } else {
441                         ret = find_next_key(path, 0, &key);
442                         if (ret)
443                                 break;
444
445                         if (need_resched() ||
446                             rwsem_is_contended(&fs_info->extent_commit_sem)) {
447                                 caching_ctl->progress = last;
448                                 btrfs_release_path(path);
449                                 up_read(&fs_info->extent_commit_sem);
450                                 mutex_unlock(&caching_ctl->mutex);
451                                 cond_resched();
452                                 goto again;
453                         }
454
455                         ret = btrfs_next_leaf(extent_root, path);
456                         if (ret < 0)
457                                 goto err;
458                         if (ret)
459                                 break;
460                         leaf = path->nodes[0];
461                         nritems = btrfs_header_nritems(leaf);
462                         continue;
463                 }
464
465                 if (key.objectid < last) {
466                         key.objectid = last;
467                         key.offset = 0;
468                         key.type = BTRFS_EXTENT_ITEM_KEY;
469
470                         caching_ctl->progress = last;
471                         btrfs_release_path(path);
472                         goto next;
473                 }
474
475                 if (key.objectid < block_group->key.objectid) {
476                         path->slots[0]++;
477                         continue;
478                 }
479
480                 if (key.objectid >= block_group->key.objectid +
481                     block_group->key.offset)
482                         break;
483
484                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
485                     key.type == BTRFS_METADATA_ITEM_KEY) {
486                         total_found += add_new_free_space(block_group,
487                                                           fs_info, last,
488                                                           key.objectid);
489                         if (key.type == BTRFS_METADATA_ITEM_KEY)
490                                 last = key.objectid +
491                                         fs_info->tree_root->leafsize;
492                         else
493                                 last = key.objectid + key.offset;
494
495                         if (total_found > (1024 * 1024 * 2)) {
496                                 total_found = 0;
497                                 wake_up(&caching_ctl->wait);
498                         }
499                 }
500                 path->slots[0]++;
501         }
502         ret = 0;
503
504         total_found += add_new_free_space(block_group, fs_info, last,
505                                           block_group->key.objectid +
506                                           block_group->key.offset);
507         caching_ctl->progress = (u64)-1;
508
509         spin_lock(&block_group->lock);
510         block_group->caching_ctl = NULL;
511         block_group->cached = BTRFS_CACHE_FINISHED;
512         spin_unlock(&block_group->lock);
513
514 err:
515         btrfs_free_path(path);
516         up_read(&fs_info->extent_commit_sem);
517
518         free_excluded_extents(extent_root, block_group);
519
520         mutex_unlock(&caching_ctl->mutex);
521 out:
522         if (ret) {
523                 spin_lock(&block_group->lock);
524                 block_group->caching_ctl = NULL;
525                 block_group->cached = BTRFS_CACHE_ERROR;
526                 spin_unlock(&block_group->lock);
527         }
528         wake_up(&caching_ctl->wait);
529
530         put_caching_control(caching_ctl);
531         btrfs_put_block_group(block_group);
532 }
533
534 static int cache_block_group(struct btrfs_block_group_cache *cache,
535                              int load_cache_only)
536 {
537         DEFINE_WAIT(wait);
538         struct btrfs_fs_info *fs_info = cache->fs_info;
539         struct btrfs_caching_control *caching_ctl;
540         int ret = 0;
541
542         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
543         if (!caching_ctl)
544                 return -ENOMEM;
545
546         INIT_LIST_HEAD(&caching_ctl->list);
547         mutex_init(&caching_ctl->mutex);
548         init_waitqueue_head(&caching_ctl->wait);
549         caching_ctl->block_group = cache;
550         caching_ctl->progress = cache->key.objectid;
551         atomic_set(&caching_ctl->count, 1);
552         caching_ctl->work.func = caching_thread;
553
554         spin_lock(&cache->lock);
555         /*
556          * This should be a rare occasion, but this could happen I think in the
557          * case where one thread starts to load the space cache info, and then
558          * some other thread starts a transaction commit which tries to do an
559          * allocation while the other thread is still loading the space cache
560          * info.  The previous loop should have kept us from choosing this block
561          * group, but if we've moved to the state where we will wait on caching
562          * block groups we need to first check if we're doing a fast load here,
563          * so we can wait for it to finish, otherwise we could end up allocating
564          * from a block group who's cache gets evicted for one reason or
565          * another.
566          */
567         while (cache->cached == BTRFS_CACHE_FAST) {
568                 struct btrfs_caching_control *ctl;
569
570                 ctl = cache->caching_ctl;
571                 atomic_inc(&ctl->count);
572                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
573                 spin_unlock(&cache->lock);
574
575                 schedule();
576
577                 finish_wait(&ctl->wait, &wait);
578                 put_caching_control(ctl);
579                 spin_lock(&cache->lock);
580         }
581
582         if (cache->cached != BTRFS_CACHE_NO) {
583                 spin_unlock(&cache->lock);
584                 kfree(caching_ctl);
585                 return 0;
586         }
587         WARN_ON(cache->caching_ctl);
588         cache->caching_ctl = caching_ctl;
589         cache->cached = BTRFS_CACHE_FAST;
590         spin_unlock(&cache->lock);
591
592         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
593                 ret = load_free_space_cache(fs_info, cache);
594
595                 spin_lock(&cache->lock);
596                 if (ret == 1) {
597                         cache->caching_ctl = NULL;
598                         cache->cached = BTRFS_CACHE_FINISHED;
599                         cache->last_byte_to_unpin = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                         }
607                 }
608                 spin_unlock(&cache->lock);
609                 wake_up(&caching_ctl->wait);
610                 if (ret == 1) {
611                         put_caching_control(caching_ctl);
612                         free_excluded_extents(fs_info->extent_root, cache);
613                         return 0;
614                 }
615         } else {
616                 /*
617                  * We are not going to do the fast caching, set cached to the
618                  * appropriate value and wakeup any waiters.
619                  */
620                 spin_lock(&cache->lock);
621                 if (load_cache_only) {
622                         cache->caching_ctl = NULL;
623                         cache->cached = BTRFS_CACHE_NO;
624                 } else {
625                         cache->cached = BTRFS_CACHE_STARTED;
626                 }
627                 spin_unlock(&cache->lock);
628                 wake_up(&caching_ctl->wait);
629         }
630
631         if (load_cache_only) {
632                 put_caching_control(caching_ctl);
633                 return 0;
634         }
635
636         down_write(&fs_info->extent_commit_sem);
637         atomic_inc(&caching_ctl->count);
638         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
639         up_write(&fs_info->extent_commit_sem);
640
641         btrfs_get_block_group(cache);
642
643         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
644
645         return ret;
646 }
647
648 /*
649  * return the block group that starts at or after bytenr
650  */
651 static struct btrfs_block_group_cache *
652 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
653 {
654         struct btrfs_block_group_cache *cache;
655
656         cache = block_group_cache_tree_search(info, bytenr, 0);
657
658         return cache;
659 }
660
661 /*
662  * return the block group that contains the given bytenr
663  */
664 struct btrfs_block_group_cache *btrfs_lookup_block_group(
665                                                  struct btrfs_fs_info *info,
666                                                  u64 bytenr)
667 {
668         struct btrfs_block_group_cache *cache;
669
670         cache = block_group_cache_tree_search(info, bytenr, 1);
671
672         return cache;
673 }
674
675 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
676                                                   u64 flags)
677 {
678         struct list_head *head = &info->space_info;
679         struct btrfs_space_info *found;
680
681         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
682
683         rcu_read_lock();
684         list_for_each_entry_rcu(found, head, list) {
685                 if (found->flags & flags) {
686                         rcu_read_unlock();
687                         return found;
688                 }
689         }
690         rcu_read_unlock();
691         return NULL;
692 }
693
694 /*
695  * after adding space to the filesystem, we need to clear the full flags
696  * on all the space infos.
697  */
698 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
699 {
700         struct list_head *head = &info->space_info;
701         struct btrfs_space_info *found;
702
703         rcu_read_lock();
704         list_for_each_entry_rcu(found, head, list)
705                 found->full = 0;
706         rcu_read_unlock();
707 }
708
709 /* simple helper to search for an existing extent at a given offset */
710 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
711 {
712         int ret;
713         struct btrfs_key key;
714         struct btrfs_path *path;
715
716         path = btrfs_alloc_path();
717         if (!path)
718                 return -ENOMEM;
719
720         key.objectid = start;
721         key.offset = len;
722         key.type = BTRFS_EXTENT_ITEM_KEY;
723         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
724                                 0, 0);
725         if (ret > 0) {
726                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
727                 if (key.objectid == start &&
728                     key.type == BTRFS_METADATA_ITEM_KEY)
729                         ret = 0;
730         }
731         btrfs_free_path(path);
732         return ret;
733 }
734
735 /*
736  * helper function to lookup reference count and flags of a tree block.
737  *
738  * the head node for delayed ref is used to store the sum of all the
739  * reference count modifications queued up in the rbtree. the head
740  * node may also store the extent flags to set. This way you can check
741  * to see what the reference count and extent flags would be if all of
742  * the delayed refs are not processed.
743  */
744 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
745                              struct btrfs_root *root, u64 bytenr,
746                              u64 offset, int metadata, u64 *refs, u64 *flags)
747 {
748         struct btrfs_delayed_ref_head *head;
749         struct btrfs_delayed_ref_root *delayed_refs;
750         struct btrfs_path *path;
751         struct btrfs_extent_item *ei;
752         struct extent_buffer *leaf;
753         struct btrfs_key key;
754         u32 item_size;
755         u64 num_refs;
756         u64 extent_flags;
757         int ret;
758
759         /*
760          * If we don't have skinny metadata, don't bother doing anything
761          * different
762          */
763         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
764                 offset = root->leafsize;
765                 metadata = 0;
766         }
767
768         path = btrfs_alloc_path();
769         if (!path)
770                 return -ENOMEM;
771
772         if (!trans) {
773                 path->skip_locking = 1;
774                 path->search_commit_root = 1;
775         }
776
777 search_again:
778         key.objectid = bytenr;
779         key.offset = offset;
780         if (metadata)
781                 key.type = BTRFS_METADATA_ITEM_KEY;
782         else
783                 key.type = BTRFS_EXTENT_ITEM_KEY;
784
785 again:
786         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
787                                 &key, path, 0, 0);
788         if (ret < 0)
789                 goto out_free;
790
791         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
792                 if (path->slots[0]) {
793                         path->slots[0]--;
794                         btrfs_item_key_to_cpu(path->nodes[0], &key,
795                                               path->slots[0]);
796                         if (key.objectid == bytenr &&
797                             key.type == BTRFS_EXTENT_ITEM_KEY &&
798                             key.offset == root->leafsize)
799                                 ret = 0;
800                 }
801                 if (ret) {
802                         key.objectid = bytenr;
803                         key.type = BTRFS_EXTENT_ITEM_KEY;
804                         key.offset = root->leafsize;
805                         btrfs_release_path(path);
806                         goto again;
807                 }
808         }
809
810         if (ret == 0) {
811                 leaf = path->nodes[0];
812                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
813                 if (item_size >= sizeof(*ei)) {
814                         ei = btrfs_item_ptr(leaf, path->slots[0],
815                                             struct btrfs_extent_item);
816                         num_refs = btrfs_extent_refs(leaf, ei);
817                         extent_flags = btrfs_extent_flags(leaf, ei);
818                 } else {
819 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
820                         struct btrfs_extent_item_v0 *ei0;
821                         BUG_ON(item_size != sizeof(*ei0));
822                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
823                                              struct btrfs_extent_item_v0);
824                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
825                         /* FIXME: this isn't correct for data */
826                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
827 #else
828                         BUG();
829 #endif
830                 }
831                 BUG_ON(num_refs == 0);
832         } else {
833                 num_refs = 0;
834                 extent_flags = 0;
835                 ret = 0;
836         }
837
838         if (!trans)
839                 goto out;
840
841         delayed_refs = &trans->transaction->delayed_refs;
842         spin_lock(&delayed_refs->lock);
843         head = btrfs_find_delayed_ref_head(trans, bytenr);
844         if (head) {
845                 if (!mutex_trylock(&head->mutex)) {
846                         atomic_inc(&head->node.refs);
847                         spin_unlock(&delayed_refs->lock);
848
849                         btrfs_release_path(path);
850
851                         /*
852                          * Mutex was contended, block until it's released and try
853                          * again
854                          */
855                         mutex_lock(&head->mutex);
856                         mutex_unlock(&head->mutex);
857                         btrfs_put_delayed_ref(&head->node);
858                         goto search_again;
859                 }
860                 spin_lock(&head->lock);
861                 if (head->extent_op && head->extent_op->update_flags)
862                         extent_flags |= head->extent_op->flags_to_set;
863                 else
864                         BUG_ON(num_refs == 0);
865
866                 num_refs += head->node.ref_mod;
867                 spin_unlock(&head->lock);
868                 mutex_unlock(&head->mutex);
869         }
870         spin_unlock(&delayed_refs->lock);
871 out:
872         WARN_ON(num_refs == 0);
873         if (refs)
874                 *refs = num_refs;
875         if (flags)
876                 *flags = extent_flags;
877 out_free:
878         btrfs_free_path(path);
879         return ret;
880 }
881
882 /*
883  * Back reference rules.  Back refs have three main goals:
884  *
885  * 1) differentiate between all holders of references to an extent so that
886  *    when a reference is dropped we can make sure it was a valid reference
887  *    before freeing the extent.
888  *
889  * 2) Provide enough information to quickly find the holders of an extent
890  *    if we notice a given block is corrupted or bad.
891  *
892  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
893  *    maintenance.  This is actually the same as #2, but with a slightly
894  *    different use case.
895  *
896  * There are two kinds of back refs. The implicit back refs is optimized
897  * for pointers in non-shared tree blocks. For a given pointer in a block,
898  * back refs of this kind provide information about the block's owner tree
899  * and the pointer's key. These information allow us to find the block by
900  * b-tree searching. The full back refs is for pointers in tree blocks not
901  * referenced by their owner trees. The location of tree block is recorded
902  * in the back refs. Actually the full back refs is generic, and can be
903  * used in all cases the implicit back refs is used. The major shortcoming
904  * of the full back refs is its overhead. Every time a tree block gets
905  * COWed, we have to update back refs entry for all pointers in it.
906  *
907  * For a newly allocated tree block, we use implicit back refs for
908  * pointers in it. This means most tree related operations only involve
909  * implicit back refs. For a tree block created in old transaction, the
910  * only way to drop a reference to it is COW it. So we can detect the
911  * event that tree block loses its owner tree's reference and do the
912  * back refs conversion.
913  *
914  * When a tree block is COW'd through a tree, there are four cases:
915  *
916  * The reference count of the block is one and the tree is the block's
917  * owner tree. Nothing to do in this case.
918  *
919  * The reference count of the block is one and the tree is not the
920  * block's owner tree. In this case, full back refs is used for pointers
921  * in the block. Remove these full back refs, add implicit back refs for
922  * every pointers in the new block.
923  *
924  * The reference count of the block is greater than one and the tree is
925  * the block's owner tree. In this case, implicit back refs is used for
926  * pointers in the block. Add full back refs for every pointers in the
927  * block, increase lower level extents' reference counts. The original
928  * implicit back refs are entailed to the new block.
929  *
930  * The reference count of the block is greater than one and the tree is
931  * not the block's owner tree. Add implicit back refs for every pointer in
932  * the new block, increase lower level extents' reference count.
933  *
934  * Back Reference Key composing:
935  *
936  * The key objectid corresponds to the first byte in the extent,
937  * The key type is used to differentiate between types of back refs.
938  * There are different meanings of the key offset for different types
939  * of back refs.
940  *
941  * File extents can be referenced by:
942  *
943  * - multiple snapshots, subvolumes, or different generations in one subvol
944  * - different files inside a single subvolume
945  * - different offsets inside a file (bookend extents in file.c)
946  *
947  * The extent ref structure for the implicit back refs has fields for:
948  *
949  * - Objectid of the subvolume root
950  * - objectid of the file holding the reference
951  * - original offset in the file
952  * - how many bookend extents
953  *
954  * The key offset for the implicit back refs is hash of the first
955  * three fields.
956  *
957  * The extent ref structure for the full back refs has field for:
958  *
959  * - number of pointers in the tree leaf
960  *
961  * The key offset for the implicit back refs is the first byte of
962  * the tree leaf
963  *
964  * When a file extent is allocated, The implicit back refs is used.
965  * the fields are filled in:
966  *
967  *     (root_key.objectid, inode objectid, offset in file, 1)
968  *
969  * When a file extent is removed file truncation, we find the
970  * corresponding implicit back refs and check the following fields:
971  *
972  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
973  *
974  * Btree extents can be referenced by:
975  *
976  * - Different subvolumes
977  *
978  * Both the implicit back refs and the full back refs for tree blocks
979  * only consist of key. The key offset for the implicit back refs is
980  * objectid of block's owner tree. The key offset for the full back refs
981  * is the first byte of parent block.
982  *
983  * When implicit back refs is used, information about the lowest key and
984  * level of the tree block are required. These information are stored in
985  * tree block info structure.
986  */
987
988 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
989 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
990                                   struct btrfs_root *root,
991                                   struct btrfs_path *path,
992                                   u64 owner, u32 extra_size)
993 {
994         struct btrfs_extent_item *item;
995         struct btrfs_extent_item_v0 *ei0;
996         struct btrfs_extent_ref_v0 *ref0;
997         struct btrfs_tree_block_info *bi;
998         struct extent_buffer *leaf;
999         struct btrfs_key key;
1000         struct btrfs_key found_key;
1001         u32 new_size = sizeof(*item);
1002         u64 refs;
1003         int ret;
1004
1005         leaf = path->nodes[0];
1006         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1007
1008         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1009         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1010                              struct btrfs_extent_item_v0);
1011         refs = btrfs_extent_refs_v0(leaf, ei0);
1012
1013         if (owner == (u64)-1) {
1014                 while (1) {
1015                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1016                                 ret = btrfs_next_leaf(root, path);
1017                                 if (ret < 0)
1018                                         return ret;
1019                                 BUG_ON(ret > 0); /* Corruption */
1020                                 leaf = path->nodes[0];
1021                         }
1022                         btrfs_item_key_to_cpu(leaf, &found_key,
1023                                               path->slots[0]);
1024                         BUG_ON(key.objectid != found_key.objectid);
1025                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1026                                 path->slots[0]++;
1027                                 continue;
1028                         }
1029                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1030                                               struct btrfs_extent_ref_v0);
1031                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1032                         break;
1033                 }
1034         }
1035         btrfs_release_path(path);
1036
1037         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1038                 new_size += sizeof(*bi);
1039
1040         new_size -= sizeof(*ei0);
1041         ret = btrfs_search_slot(trans, root, &key, path,
1042                                 new_size + extra_size, 1);
1043         if (ret < 0)
1044                 return ret;
1045         BUG_ON(ret); /* Corruption */
1046
1047         btrfs_extend_item(root, path, new_size);
1048
1049         leaf = path->nodes[0];
1050         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1051         btrfs_set_extent_refs(leaf, item, refs);
1052         /* FIXME: get real generation */
1053         btrfs_set_extent_generation(leaf, item, 0);
1054         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1055                 btrfs_set_extent_flags(leaf, item,
1056                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1057                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1058                 bi = (struct btrfs_tree_block_info *)(item + 1);
1059                 /* FIXME: get first key of the block */
1060                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1061                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1062         } else {
1063                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1064         }
1065         btrfs_mark_buffer_dirty(leaf);
1066         return 0;
1067 }
1068 #endif
1069
1070 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1071 {
1072         u32 high_crc = ~(u32)0;
1073         u32 low_crc = ~(u32)0;
1074         __le64 lenum;
1075
1076         lenum = cpu_to_le64(root_objectid);
1077         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1078         lenum = cpu_to_le64(owner);
1079         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1080         lenum = cpu_to_le64(offset);
1081         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1082
1083         return ((u64)high_crc << 31) ^ (u64)low_crc;
1084 }
1085
1086 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1087                                      struct btrfs_extent_data_ref *ref)
1088 {
1089         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1090                                     btrfs_extent_data_ref_objectid(leaf, ref),
1091                                     btrfs_extent_data_ref_offset(leaf, ref));
1092 }
1093
1094 static int match_extent_data_ref(struct extent_buffer *leaf,
1095                                  struct btrfs_extent_data_ref *ref,
1096                                  u64 root_objectid, u64 owner, u64 offset)
1097 {
1098         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1099             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1100             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1101                 return 0;
1102         return 1;
1103 }
1104
1105 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1106                                            struct btrfs_root *root,
1107                                            struct btrfs_path *path,
1108                                            u64 bytenr, u64 parent,
1109                                            u64 root_objectid,
1110                                            u64 owner, u64 offset)
1111 {
1112         struct btrfs_key key;
1113         struct btrfs_extent_data_ref *ref;
1114         struct extent_buffer *leaf;
1115         u32 nritems;
1116         int ret;
1117         int recow;
1118         int err = -ENOENT;
1119
1120         key.objectid = bytenr;
1121         if (parent) {
1122                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1123                 key.offset = parent;
1124         } else {
1125                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1126                 key.offset = hash_extent_data_ref(root_objectid,
1127                                                   owner, offset);
1128         }
1129 again:
1130         recow = 0;
1131         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1132         if (ret < 0) {
1133                 err = ret;
1134                 goto fail;
1135         }
1136
1137         if (parent) {
1138                 if (!ret)
1139                         return 0;
1140 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1141                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1142                 btrfs_release_path(path);
1143                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1144                 if (ret < 0) {
1145                         err = ret;
1146                         goto fail;
1147                 }
1148                 if (!ret)
1149                         return 0;
1150 #endif
1151                 goto fail;
1152         }
1153
1154         leaf = path->nodes[0];
1155         nritems = btrfs_header_nritems(leaf);
1156         while (1) {
1157                 if (path->slots[0] >= nritems) {
1158                         ret = btrfs_next_leaf(root, path);
1159                         if (ret < 0)
1160                                 err = ret;
1161                         if (ret)
1162                                 goto fail;
1163
1164                         leaf = path->nodes[0];
1165                         nritems = btrfs_header_nritems(leaf);
1166                         recow = 1;
1167                 }
1168
1169                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1170                 if (key.objectid != bytenr ||
1171                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1172                         goto fail;
1173
1174                 ref = btrfs_item_ptr(leaf, path->slots[0],
1175                                      struct btrfs_extent_data_ref);
1176
1177                 if (match_extent_data_ref(leaf, ref, root_objectid,
1178                                           owner, offset)) {
1179                         if (recow) {
1180                                 btrfs_release_path(path);
1181                                 goto again;
1182                         }
1183                         err = 0;
1184                         break;
1185                 }
1186                 path->slots[0]++;
1187         }
1188 fail:
1189         return err;
1190 }
1191
1192 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1193                                            struct btrfs_root *root,
1194                                            struct btrfs_path *path,
1195                                            u64 bytenr, u64 parent,
1196                                            u64 root_objectid, u64 owner,
1197                                            u64 offset, int refs_to_add)
1198 {
1199         struct btrfs_key key;
1200         struct extent_buffer *leaf;
1201         u32 size;
1202         u32 num_refs;
1203         int ret;
1204
1205         key.objectid = bytenr;
1206         if (parent) {
1207                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1208                 key.offset = parent;
1209                 size = sizeof(struct btrfs_shared_data_ref);
1210         } else {
1211                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1212                 key.offset = hash_extent_data_ref(root_objectid,
1213                                                   owner, offset);
1214                 size = sizeof(struct btrfs_extent_data_ref);
1215         }
1216
1217         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1218         if (ret && ret != -EEXIST)
1219                 goto fail;
1220
1221         leaf = path->nodes[0];
1222         if (parent) {
1223                 struct btrfs_shared_data_ref *ref;
1224                 ref = btrfs_item_ptr(leaf, path->slots[0],
1225                                      struct btrfs_shared_data_ref);
1226                 if (ret == 0) {
1227                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1228                 } else {
1229                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1230                         num_refs += refs_to_add;
1231                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1232                 }
1233         } else {
1234                 struct btrfs_extent_data_ref *ref;
1235                 while (ret == -EEXIST) {
1236                         ref = btrfs_item_ptr(leaf, path->slots[0],
1237                                              struct btrfs_extent_data_ref);
1238                         if (match_extent_data_ref(leaf, ref, root_objectid,
1239                                                   owner, offset))
1240                                 break;
1241                         btrfs_release_path(path);
1242                         key.offset++;
1243                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1244                                                       size);
1245                         if (ret && ret != -EEXIST)
1246                                 goto fail;
1247
1248                         leaf = path->nodes[0];
1249                 }
1250                 ref = btrfs_item_ptr(leaf, path->slots[0],
1251                                      struct btrfs_extent_data_ref);
1252                 if (ret == 0) {
1253                         btrfs_set_extent_data_ref_root(leaf, ref,
1254                                                        root_objectid);
1255                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1256                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1257                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1258                 } else {
1259                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1260                         num_refs += refs_to_add;
1261                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1262                 }
1263         }
1264         btrfs_mark_buffer_dirty(leaf);
1265         ret = 0;
1266 fail:
1267         btrfs_release_path(path);
1268         return ret;
1269 }
1270
1271 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1272                                            struct btrfs_root *root,
1273                                            struct btrfs_path *path,
1274                                            int refs_to_drop)
1275 {
1276         struct btrfs_key key;
1277         struct btrfs_extent_data_ref *ref1 = NULL;
1278         struct btrfs_shared_data_ref *ref2 = NULL;
1279         struct extent_buffer *leaf;
1280         u32 num_refs = 0;
1281         int ret = 0;
1282
1283         leaf = path->nodes[0];
1284         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1285
1286         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1287                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_data_ref);
1289                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1290         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1291                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1292                                       struct btrfs_shared_data_ref);
1293                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1295         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1296                 struct btrfs_extent_ref_v0 *ref0;
1297                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1298                                       struct btrfs_extent_ref_v0);
1299                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1300 #endif
1301         } else {
1302                 BUG();
1303         }
1304
1305         BUG_ON(num_refs < refs_to_drop);
1306         num_refs -= refs_to_drop;
1307
1308         if (num_refs == 0) {
1309                 ret = btrfs_del_item(trans, root, path);
1310         } else {
1311                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1312                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1313                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1314                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316                 else {
1317                         struct btrfs_extent_ref_v0 *ref0;
1318                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1319                                         struct btrfs_extent_ref_v0);
1320                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1321                 }
1322 #endif
1323                 btrfs_mark_buffer_dirty(leaf);
1324         }
1325         return ret;
1326 }
1327
1328 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1329                                           struct btrfs_path *path,
1330                                           struct btrfs_extent_inline_ref *iref)
1331 {
1332         struct btrfs_key key;
1333         struct extent_buffer *leaf;
1334         struct btrfs_extent_data_ref *ref1;
1335         struct btrfs_shared_data_ref *ref2;
1336         u32 num_refs = 0;
1337
1338         leaf = path->nodes[0];
1339         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1340         if (iref) {
1341                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1342                     BTRFS_EXTENT_DATA_REF_KEY) {
1343                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1344                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345                 } else {
1346                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1347                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348                 }
1349         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1350                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_data_ref);
1352                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1353         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1354                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1355                                       struct btrfs_shared_data_ref);
1356                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1359                 struct btrfs_extent_ref_v0 *ref0;
1360                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1361                                       struct btrfs_extent_ref_v0);
1362                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1363 #endif
1364         } else {
1365                 WARN_ON(1);
1366         }
1367         return num_refs;
1368 }
1369
1370 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1371                                           struct btrfs_root *root,
1372                                           struct btrfs_path *path,
1373                                           u64 bytenr, u64 parent,
1374                                           u64 root_objectid)
1375 {
1376         struct btrfs_key key;
1377         int ret;
1378
1379         key.objectid = bytenr;
1380         if (parent) {
1381                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1382                 key.offset = parent;
1383         } else {
1384                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1385                 key.offset = root_objectid;
1386         }
1387
1388         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1389         if (ret > 0)
1390                 ret = -ENOENT;
1391 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1392         if (ret == -ENOENT && parent) {
1393                 btrfs_release_path(path);
1394                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1395                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1396                 if (ret > 0)
1397                         ret = -ENOENT;
1398         }
1399 #endif
1400         return ret;
1401 }
1402
1403 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1404                                           struct btrfs_root *root,
1405                                           struct btrfs_path *path,
1406                                           u64 bytenr, u64 parent,
1407                                           u64 root_objectid)
1408 {
1409         struct btrfs_key key;
1410         int ret;
1411
1412         key.objectid = bytenr;
1413         if (parent) {
1414                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1415                 key.offset = parent;
1416         } else {
1417                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1418                 key.offset = root_objectid;
1419         }
1420
1421         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1422         btrfs_release_path(path);
1423         return ret;
1424 }
1425
1426 static inline int extent_ref_type(u64 parent, u64 owner)
1427 {
1428         int type;
1429         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1430                 if (parent > 0)
1431                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1432                 else
1433                         type = BTRFS_TREE_BLOCK_REF_KEY;
1434         } else {
1435                 if (parent > 0)
1436                         type = BTRFS_SHARED_DATA_REF_KEY;
1437                 else
1438                         type = BTRFS_EXTENT_DATA_REF_KEY;
1439         }
1440         return type;
1441 }
1442
1443 static int find_next_key(struct btrfs_path *path, int level,
1444                          struct btrfs_key *key)
1445
1446 {
1447         for (; level < BTRFS_MAX_LEVEL; level++) {
1448                 if (!path->nodes[level])
1449                         break;
1450                 if (path->slots[level] + 1 >=
1451                     btrfs_header_nritems(path->nodes[level]))
1452                         continue;
1453                 if (level == 0)
1454                         btrfs_item_key_to_cpu(path->nodes[level], key,
1455                                               path->slots[level] + 1);
1456                 else
1457                         btrfs_node_key_to_cpu(path->nodes[level], key,
1458                                               path->slots[level] + 1);
1459                 return 0;
1460         }
1461         return 1;
1462 }
1463
1464 /*
1465  * look for inline back ref. if back ref is found, *ref_ret is set
1466  * to the address of inline back ref, and 0 is returned.
1467  *
1468  * if back ref isn't found, *ref_ret is set to the address where it
1469  * should be inserted, and -ENOENT is returned.
1470  *
1471  * if insert is true and there are too many inline back refs, the path
1472  * points to the extent item, and -EAGAIN is returned.
1473  *
1474  * NOTE: inline back refs are ordered in the same way that back ref
1475  *       items in the tree are ordered.
1476  */
1477 static noinline_for_stack
1478 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1479                                  struct btrfs_root *root,
1480                                  struct btrfs_path *path,
1481                                  struct btrfs_extent_inline_ref **ref_ret,
1482                                  u64 bytenr, u64 num_bytes,
1483                                  u64 parent, u64 root_objectid,
1484                                  u64 owner, u64 offset, int insert)
1485 {
1486         struct btrfs_key key;
1487         struct extent_buffer *leaf;
1488         struct btrfs_extent_item *ei;
1489         struct btrfs_extent_inline_ref *iref;
1490         u64 flags;
1491         u64 item_size;
1492         unsigned long ptr;
1493         unsigned long end;
1494         int extra_size;
1495         int type;
1496         int want;
1497         int ret;
1498         int err = 0;
1499         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1500                                                  SKINNY_METADATA);
1501
1502         key.objectid = bytenr;
1503         key.type = BTRFS_EXTENT_ITEM_KEY;
1504         key.offset = num_bytes;
1505
1506         want = extent_ref_type(parent, owner);
1507         if (insert) {
1508                 extra_size = btrfs_extent_inline_ref_size(want);
1509                 path->keep_locks = 1;
1510         } else
1511                 extra_size = -1;
1512
1513         /*
1514          * Owner is our parent level, so we can just add one to get the level
1515          * for the block we are interested in.
1516          */
1517         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1518                 key.type = BTRFS_METADATA_ITEM_KEY;
1519                 key.offset = owner;
1520         }
1521
1522 again:
1523         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1524         if (ret < 0) {
1525                 err = ret;
1526                 goto out;
1527         }
1528
1529         /*
1530          * We may be a newly converted file system which still has the old fat
1531          * extent entries for metadata, so try and see if we have one of those.
1532          */
1533         if (ret > 0 && skinny_metadata) {
1534                 skinny_metadata = false;
1535                 if (path->slots[0]) {
1536                         path->slots[0]--;
1537                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1538                                               path->slots[0]);
1539                         if (key.objectid == bytenr &&
1540                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1541                             key.offset == num_bytes)
1542                                 ret = 0;
1543                 }
1544                 if (ret) {
1545                         key.type = BTRFS_EXTENT_ITEM_KEY;
1546                         key.offset = num_bytes;
1547                         btrfs_release_path(path);
1548                         goto again;
1549                 }
1550         }
1551
1552         if (ret && !insert) {
1553                 err = -ENOENT;
1554                 goto out;
1555         } else if (WARN_ON(ret)) {
1556                 err = -EIO;
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op)
1767 {
1768         struct extent_buffer *leaf;
1769         struct btrfs_extent_item *ei;
1770         struct btrfs_extent_data_ref *dref = NULL;
1771         struct btrfs_shared_data_ref *sref = NULL;
1772         unsigned long ptr;
1773         unsigned long end;
1774         u32 item_size;
1775         int size;
1776         int type;
1777         u64 refs;
1778
1779         leaf = path->nodes[0];
1780         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781         refs = btrfs_extent_refs(leaf, ei);
1782         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783         refs += refs_to_mod;
1784         btrfs_set_extent_refs(leaf, ei, refs);
1785         if (extent_op)
1786                 __run_delayed_extent_op(extent_op, leaf, ei);
1787
1788         type = btrfs_extent_inline_ref_type(leaf, iref);
1789
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792                 refs = btrfs_extent_data_ref_count(leaf, dref);
1793         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795                 refs = btrfs_shared_data_ref_count(leaf, sref);
1796         } else {
1797                 refs = 1;
1798                 BUG_ON(refs_to_mod != -1);
1799         }
1800
1801         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802         refs += refs_to_mod;
1803
1804         if (refs > 0) {
1805                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1807                 else
1808                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1809         } else {
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1884         } else {
1885                 ret = btrfs_del_item(trans, root, path);
1886         }
1887         return ret;
1888 }
1889
1890 static int btrfs_issue_discard(struct block_device *bdev,
1891                                 u64 start, u64 len)
1892 {
1893         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1894 }
1895
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897                                 u64 num_bytes, u64 *actual_bytes)
1898 {
1899         int ret;
1900         u64 discarded_bytes = 0;
1901         struct btrfs_bio *bbio = NULL;
1902
1903
1904         /* Tell the block device(s) that the sectors can be discarded */
1905         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906                               bytenr, &num_bytes, &bbio, 0);
1907         /* Error condition is -ENOMEM */
1908         if (!ret) {
1909                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1910                 int i;
1911
1912
1913                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914                         if (!stripe->dev->can_discard)
1915                                 continue;
1916
1917                         ret = btrfs_issue_discard(stripe->dev->bdev,
1918                                                   stripe->physical,
1919                                                   stripe->length);
1920                         if (!ret)
1921                                 discarded_bytes += stripe->length;
1922                         else if (ret != -EOPNOTSUPP)
1923                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1924
1925                         /*
1926                          * Just in case we get back EOPNOTSUPP for some reason,
1927                          * just ignore the return value so we don't screw up
1928                          * people calling discard_extent.
1929                          */
1930                         ret = 0;
1931                 }
1932                 kfree(bbio);
1933         }
1934
1935         if (actual_bytes)
1936                 *actual_bytes = discarded_bytes;
1937
1938
1939         if (ret == -EOPNOTSUPP)
1940                 ret = 0;
1941         return ret;
1942 }
1943
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946                          struct btrfs_root *root,
1947                          u64 bytenr, u64 num_bytes, u64 parent,
1948                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1949 {
1950         int ret;
1951         struct btrfs_fs_info *fs_info = root->fs_info;
1952
1953         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1955
1956         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1958                                         num_bytes,
1959                                         parent, root_objectid, (int)owner,
1960                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1961         } else {
1962                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, owner, offset,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1966         }
1967         return ret;
1968 }
1969
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971                                   struct btrfs_root *root,
1972                                   u64 bytenr, u64 num_bytes,
1973                                   u64 parent, u64 root_objectid,
1974                                   u64 owner, u64 offset, int refs_to_add,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         u64 refs;
1981         int ret;
1982
1983         path = btrfs_alloc_path();
1984         if (!path)
1985                 return -ENOMEM;
1986
1987         path->reada = 1;
1988         path->leave_spinning = 1;
1989         /* this will setup the path even if it fails to insert the back ref */
1990         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1991                                            path, bytenr, num_bytes, parent,
1992                                            root_objectid, owner, offset,
1993                                            refs_to_add, extent_op);
1994         if (ret != -EAGAIN)
1995                 goto out;
1996
1997         leaf = path->nodes[0];
1998         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999         refs = btrfs_extent_refs(leaf, item);
2000         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2001         if (extent_op)
2002                 __run_delayed_extent_op(extent_op, leaf, item);
2003
2004         btrfs_mark_buffer_dirty(leaf);
2005         btrfs_release_path(path);
2006
2007         path->reada = 1;
2008         path->leave_spinning = 1;
2009
2010         /* now insert the actual backref */
2011         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012                                     path, bytenr, parent, root_objectid,
2013                                     owner, offset, refs_to_add);
2014         if (ret)
2015                 btrfs_abort_transaction(trans, root, ret);
2016 out:
2017         btrfs_free_path(path);
2018         return ret;
2019 }
2020
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op,
2025                                 int insert_reserved)
2026 {
2027         int ret = 0;
2028         struct btrfs_delayed_data_ref *ref;
2029         struct btrfs_key ins;
2030         u64 parent = 0;
2031         u64 ref_root = 0;
2032         u64 flags = 0;
2033
2034         ins.objectid = node->bytenr;
2035         ins.offset = node->num_bytes;
2036         ins.type = BTRFS_EXTENT_ITEM_KEY;
2037
2038         ref = btrfs_delayed_node_to_data_ref(node);
2039         trace_run_delayed_data_ref(node, ref, node->action);
2040
2041         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2042                 parent = ref->parent;
2043         else
2044                 ref_root = ref->root;
2045
2046         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2047                 if (extent_op)
2048                         flags |= extent_op->flags_to_set;
2049                 ret = alloc_reserved_file_extent(trans, root,
2050                                                  parent, ref_root, flags,
2051                                                  ref->objectid, ref->offset,
2052                                                  &ins, node->ref_mod);
2053         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2054                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2055                                              node->num_bytes, parent,
2056                                              ref_root, ref->objectid,
2057                                              ref->offset, node->ref_mod,
2058                                              extent_op);
2059         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2060                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2061                                           node->num_bytes, parent,
2062                                           ref_root, ref->objectid,
2063                                           ref->offset, node->ref_mod,
2064                                           extent_op);
2065         } else {
2066                 BUG();
2067         }
2068         return ret;
2069 }
2070
2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2072                                     struct extent_buffer *leaf,
2073                                     struct btrfs_extent_item *ei)
2074 {
2075         u64 flags = btrfs_extent_flags(leaf, ei);
2076         if (extent_op->update_flags) {
2077                 flags |= extent_op->flags_to_set;
2078                 btrfs_set_extent_flags(leaf, ei, flags);
2079         }
2080
2081         if (extent_op->update_key) {
2082                 struct btrfs_tree_block_info *bi;
2083                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2084                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2085                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2086         }
2087 }
2088
2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2090                                  struct btrfs_root *root,
2091                                  struct btrfs_delayed_ref_node *node,
2092                                  struct btrfs_delayed_extent_op *extent_op)
2093 {
2094         struct btrfs_key key;
2095         struct btrfs_path *path;
2096         struct btrfs_extent_item *ei;
2097         struct extent_buffer *leaf;
2098         u32 item_size;
2099         int ret;
2100         int err = 0;
2101         int metadata = !extent_op->is_data;
2102
2103         if (trans->aborted)
2104                 return 0;
2105
2106         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2107                 metadata = 0;
2108
2109         path = btrfs_alloc_path();
2110         if (!path)
2111                 return -ENOMEM;
2112
2113         key.objectid = node->bytenr;
2114
2115         if (metadata) {
2116                 key.type = BTRFS_METADATA_ITEM_KEY;
2117                 key.offset = extent_op->level;
2118         } else {
2119                 key.type = BTRFS_EXTENT_ITEM_KEY;
2120                 key.offset = node->num_bytes;
2121         }
2122
2123 again:
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2127                                 path, 0, 1);
2128         if (ret < 0) {
2129                 err = ret;
2130                 goto out;
2131         }
2132         if (ret > 0) {
2133                 if (metadata) {
2134                         if (path->slots[0] > 0) {
2135                                 path->slots[0]--;
2136                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2137                                                       path->slots[0]);
2138                                 if (key.objectid == node->bytenr &&
2139                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2140                                     key.offset == node->num_bytes)
2141                                         ret = 0;
2142                         }
2143                         if (ret > 0) {
2144                                 btrfs_release_path(path);
2145                                 metadata = 0;
2146
2147                                 key.objectid = node->bytenr;
2148                                 key.offset = node->num_bytes;
2149                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2150                                 goto again;
2151                         }
2152                 } else {
2153                         err = -EIO;
2154                         goto out;
2155                 }
2156         }
2157
2158         leaf = path->nodes[0];
2159         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2161         if (item_size < sizeof(*ei)) {
2162                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2163                                              path, (u64)-1, 0);
2164                 if (ret < 0) {
2165                         err = ret;
2166                         goto out;
2167                 }
2168                 leaf = path->nodes[0];
2169                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2170         }
2171 #endif
2172         BUG_ON(item_size < sizeof(*ei));
2173         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2174         __run_delayed_extent_op(extent_op, leaf, ei);
2175
2176         btrfs_mark_buffer_dirty(leaf);
2177 out:
2178         btrfs_free_path(path);
2179         return err;
2180 }
2181
2182 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2183                                 struct btrfs_root *root,
2184                                 struct btrfs_delayed_ref_node *node,
2185                                 struct btrfs_delayed_extent_op *extent_op,
2186                                 int insert_reserved)
2187 {
2188         int ret = 0;
2189         struct btrfs_delayed_tree_ref *ref;
2190         struct btrfs_key ins;
2191         u64 parent = 0;
2192         u64 ref_root = 0;
2193         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2194                                                  SKINNY_METADATA);
2195
2196         ref = btrfs_delayed_node_to_tree_ref(node);
2197         trace_run_delayed_tree_ref(node, ref, node->action);
2198
2199         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2200                 parent = ref->parent;
2201         else
2202                 ref_root = ref->root;
2203
2204         ins.objectid = node->bytenr;
2205         if (skinny_metadata) {
2206                 ins.offset = ref->level;
2207                 ins.type = BTRFS_METADATA_ITEM_KEY;
2208         } else {
2209                 ins.offset = node->num_bytes;
2210                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2211         }
2212
2213         BUG_ON(node->ref_mod != 1);
2214         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2215                 BUG_ON(!extent_op || !extent_op->update_flags);
2216                 ret = alloc_reserved_tree_block(trans, root,
2217                                                 parent, ref_root,
2218                                                 extent_op->flags_to_set,
2219                                                 &extent_op->key,
2220                                                 ref->level, &ins);
2221         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2222                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2223                                              node->num_bytes, parent, ref_root,
2224                                              ref->level, 0, 1, extent_op);
2225         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2226                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2227                                           node->num_bytes, parent, ref_root,
2228                                           ref->level, 0, 1, extent_op);
2229         } else {
2230                 BUG();
2231         }
2232         return ret;
2233 }
2234
2235 /* helper function to actually process a single delayed ref entry */
2236 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2237                                struct btrfs_root *root,
2238                                struct btrfs_delayed_ref_node *node,
2239                                struct btrfs_delayed_extent_op *extent_op,
2240                                int insert_reserved)
2241 {
2242         int ret = 0;
2243
2244         if (trans->aborted) {
2245                 if (insert_reserved)
2246                         btrfs_pin_extent(root, node->bytenr,
2247                                          node->num_bytes, 1);
2248                 return 0;
2249         }
2250
2251         if (btrfs_delayed_ref_is_head(node)) {
2252                 struct btrfs_delayed_ref_head *head;
2253                 /*
2254                  * we've hit the end of the chain and we were supposed
2255                  * to insert this extent into the tree.  But, it got
2256                  * deleted before we ever needed to insert it, so all
2257                  * we have to do is clean up the accounting
2258                  */
2259                 BUG_ON(extent_op);
2260                 head = btrfs_delayed_node_to_head(node);
2261                 trace_run_delayed_ref_head(node, head, node->action);
2262
2263                 if (insert_reserved) {
2264                         btrfs_pin_extent(root, node->bytenr,
2265                                          node->num_bytes, 1);
2266                         if (head->is_data) {
2267                                 ret = btrfs_del_csums(trans, root,
2268                                                       node->bytenr,
2269                                                       node->num_bytes);
2270                         }
2271                 }
2272                 return ret;
2273         }
2274
2275         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2276             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2277                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2278                                            insert_reserved);
2279         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2280                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2281                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2282                                            insert_reserved);
2283         else
2284                 BUG();
2285         return ret;
2286 }
2287
2288 static noinline struct btrfs_delayed_ref_node *
2289 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2290 {
2291         struct rb_node *node;
2292         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2293
2294         /*
2295          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2296          * this prevents ref count from going down to zero when
2297          * there still are pending delayed ref.
2298          */
2299         node = rb_first(&head->ref_root);
2300         while (node) {
2301                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2302                                 rb_node);
2303                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2304                         return ref;
2305                 else if (last == NULL)
2306                         last = ref;
2307                 node = rb_next(node);
2308         }
2309         return last;
2310 }
2311
2312 /*
2313  * Returns 0 on success or if called with an already aborted transaction.
2314  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2315  */
2316 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2317                                              struct btrfs_root *root,
2318                                              unsigned long nr)
2319 {
2320         struct btrfs_delayed_ref_root *delayed_refs;
2321         struct btrfs_delayed_ref_node *ref;
2322         struct btrfs_delayed_ref_head *locked_ref = NULL;
2323         struct btrfs_delayed_extent_op *extent_op;
2324         struct btrfs_fs_info *fs_info = root->fs_info;
2325         ktime_t start = ktime_get();
2326         int ret;
2327         unsigned long count = 0;
2328         unsigned long actual_count = 0;
2329         int must_insert_reserved = 0;
2330
2331         delayed_refs = &trans->transaction->delayed_refs;
2332         while (1) {
2333                 if (!locked_ref) {
2334                         if (count >= nr)
2335                                 break;
2336
2337                         spin_lock(&delayed_refs->lock);
2338                         locked_ref = btrfs_select_ref_head(trans);
2339                         if (!locked_ref) {
2340                                 spin_unlock(&delayed_refs->lock);
2341                                 break;
2342                         }
2343
2344                         /* grab the lock that says we are going to process
2345                          * all the refs for this head */
2346                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2347                         spin_unlock(&delayed_refs->lock);
2348                         /*
2349                          * we may have dropped the spin lock to get the head
2350                          * mutex lock, and that might have given someone else
2351                          * time to free the head.  If that's true, it has been
2352                          * removed from our list and we can move on.
2353                          */
2354                         if (ret == -EAGAIN) {
2355                                 locked_ref = NULL;
2356                                 count++;
2357                                 continue;
2358                         }
2359                 }
2360
2361                 /*
2362                  * We need to try and merge add/drops of the same ref since we
2363                  * can run into issues with relocate dropping the implicit ref
2364                  * and then it being added back again before the drop can
2365                  * finish.  If we merged anything we need to re-loop so we can
2366                  * get a good ref.
2367                  */
2368                 spin_lock(&locked_ref->lock);
2369                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2370                                          locked_ref);
2371
2372                 /*
2373                  * locked_ref is the head node, so we have to go one
2374                  * node back for any delayed ref updates
2375                  */
2376                 ref = select_delayed_ref(locked_ref);
2377
2378                 if (ref && ref->seq &&
2379                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2380                         spin_unlock(&locked_ref->lock);
2381                         btrfs_delayed_ref_unlock(locked_ref);
2382                         spin_lock(&delayed_refs->lock);
2383                         locked_ref->processing = 0;
2384                         delayed_refs->num_heads_ready++;
2385                         spin_unlock(&delayed_refs->lock);
2386                         locked_ref = NULL;
2387                         cond_resched();
2388                         count++;
2389                         continue;
2390                 }
2391
2392                 /*
2393                  * record the must insert reserved flag before we
2394                  * drop the spin lock.
2395                  */
2396                 must_insert_reserved = locked_ref->must_insert_reserved;
2397                 locked_ref->must_insert_reserved = 0;
2398
2399                 extent_op = locked_ref->extent_op;
2400                 locked_ref->extent_op = NULL;
2401
2402                 if (!ref) {
2403
2404
2405                         /* All delayed refs have been processed, Go ahead
2406                          * and send the head node to run_one_delayed_ref,
2407                          * so that any accounting fixes can happen
2408                          */
2409                         ref = &locked_ref->node;
2410
2411                         if (extent_op && must_insert_reserved) {
2412                                 btrfs_free_delayed_extent_op(extent_op);
2413                                 extent_op = NULL;
2414                         }
2415
2416                         if (extent_op) {
2417                                 spin_unlock(&locked_ref->lock);
2418                                 ret = run_delayed_extent_op(trans, root,
2419                                                             ref, extent_op);
2420                                 btrfs_free_delayed_extent_op(extent_op);
2421
2422                                 if (ret) {
2423                                         /*
2424                                          * Need to reset must_insert_reserved if
2425                                          * there was an error so the abort stuff
2426                                          * can cleanup the reserved space
2427                                          * properly.
2428                                          */
2429                                         if (must_insert_reserved)
2430                                                 locked_ref->must_insert_reserved = 1;
2431                                         locked_ref->processing = 0;
2432                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2433                                         btrfs_delayed_ref_unlock(locked_ref);
2434                                         return ret;
2435                                 }
2436                                 continue;
2437                         }
2438
2439                         /*
2440                          * Need to drop our head ref lock and re-aqcuire the
2441                          * delayed ref lock and then re-check to make sure
2442                          * nobody got added.
2443                          */
2444                         spin_unlock(&locked_ref->lock);
2445                         spin_lock(&delayed_refs->lock);
2446                         spin_lock(&locked_ref->lock);
2447                         if (rb_first(&locked_ref->ref_root)) {
2448                                 spin_unlock(&locked_ref->lock);
2449                                 spin_unlock(&delayed_refs->lock);
2450                                 continue;
2451                         }
2452                         ref->in_tree = 0;
2453                         delayed_refs->num_heads--;
2454                         rb_erase(&locked_ref->href_node,
2455                                  &delayed_refs->href_root);
2456                         spin_unlock(&delayed_refs->lock);
2457                 } else {
2458                         actual_count++;
2459                         ref->in_tree = 0;
2460                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2461                 }
2462                 atomic_dec(&delayed_refs->num_entries);
2463
2464                 if (!btrfs_delayed_ref_is_head(ref)) {
2465                         /*
2466                          * when we play the delayed ref, also correct the
2467                          * ref_mod on head
2468                          */
2469                         switch (ref->action) {
2470                         case BTRFS_ADD_DELAYED_REF:
2471                         case BTRFS_ADD_DELAYED_EXTENT:
2472                                 locked_ref->node.ref_mod -= ref->ref_mod;
2473                                 break;
2474                         case BTRFS_DROP_DELAYED_REF:
2475                                 locked_ref->node.ref_mod += ref->ref_mod;
2476                                 break;
2477                         default:
2478                                 WARN_ON(1);
2479                         }
2480                 }
2481                 spin_unlock(&locked_ref->lock);
2482
2483                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2484                                           must_insert_reserved);
2485
2486                 btrfs_free_delayed_extent_op(extent_op);
2487                 if (ret) {
2488                         locked_ref->processing = 0;
2489                         btrfs_delayed_ref_unlock(locked_ref);
2490                         btrfs_put_delayed_ref(ref);
2491                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2492                         return ret;
2493                 }
2494
2495                 /*
2496                  * If this node is a head, that means all the refs in this head
2497                  * have been dealt with, and we will pick the next head to deal
2498                  * with, so we must unlock the head and drop it from the cluster
2499                  * list before we release it.
2500                  */
2501                 if (btrfs_delayed_ref_is_head(ref)) {
2502                         btrfs_delayed_ref_unlock(locked_ref);
2503                         locked_ref = NULL;
2504                 }
2505                 btrfs_put_delayed_ref(ref);
2506                 count++;
2507                 cond_resched();
2508         }
2509
2510         /*
2511          * We don't want to include ref heads since we can have empty ref heads
2512          * and those will drastically skew our runtime down since we just do
2513          * accounting, no actual extent tree updates.
2514          */
2515         if (actual_count > 0) {
2516                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2517                 u64 avg;
2518
2519                 /*
2520                  * We weigh the current average higher than our current runtime
2521                  * to avoid large swings in the average.
2522                  */
2523                 spin_lock(&delayed_refs->lock);
2524                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2525                 avg = div64_u64(avg, 4);
2526                 fs_info->avg_delayed_ref_runtime = avg;
2527                 spin_unlock(&delayed_refs->lock);
2528         }
2529         return 0;
2530 }
2531
2532 #ifdef SCRAMBLE_DELAYED_REFS
2533 /*
2534  * Normally delayed refs get processed in ascending bytenr order. This
2535  * correlates in most cases to the order added. To expose dependencies on this
2536  * order, we start to process the tree in the middle instead of the beginning
2537  */
2538 static u64 find_middle(struct rb_root *root)
2539 {
2540         struct rb_node *n = root->rb_node;
2541         struct btrfs_delayed_ref_node *entry;
2542         int alt = 1;
2543         u64 middle;
2544         u64 first = 0, last = 0;
2545
2546         n = rb_first(root);
2547         if (n) {
2548                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2549                 first = entry->bytenr;
2550         }
2551         n = rb_last(root);
2552         if (n) {
2553                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2554                 last = entry->bytenr;
2555         }
2556         n = root->rb_node;
2557
2558         while (n) {
2559                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2560                 WARN_ON(!entry->in_tree);
2561
2562                 middle = entry->bytenr;
2563
2564                 if (alt)
2565                         n = n->rb_left;
2566                 else
2567                         n = n->rb_right;
2568
2569                 alt = 1 - alt;
2570         }
2571         return middle;
2572 }
2573 #endif
2574
2575 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2576                                          struct btrfs_fs_info *fs_info)
2577 {
2578         struct qgroup_update *qgroup_update;
2579         int ret = 0;
2580
2581         if (list_empty(&trans->qgroup_ref_list) !=
2582             !trans->delayed_ref_elem.seq) {
2583                 /* list without seq or seq without list */
2584                 btrfs_err(fs_info,
2585                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2586                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2587                         (u32)(trans->delayed_ref_elem.seq >> 32),
2588                         (u32)trans->delayed_ref_elem.seq);
2589                 BUG();
2590         }
2591
2592         if (!trans->delayed_ref_elem.seq)
2593                 return 0;
2594
2595         while (!list_empty(&trans->qgroup_ref_list)) {
2596                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2597                                                  struct qgroup_update, list);
2598                 list_del(&qgroup_update->list);
2599                 if (!ret)
2600                         ret = btrfs_qgroup_account_ref(
2601                                         trans, fs_info, qgroup_update->node,
2602                                         qgroup_update->extent_op);
2603                 kfree(qgroup_update);
2604         }
2605
2606         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2607
2608         return ret;
2609 }
2610
2611 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2612 {
2613         u64 num_bytes;
2614
2615         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2616                              sizeof(struct btrfs_extent_inline_ref));
2617         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2618                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2619
2620         /*
2621          * We don't ever fill up leaves all the way so multiply by 2 just to be
2622          * closer to what we're really going to want to ouse.
2623          */
2624         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2625 }
2626
2627 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2628                                        struct btrfs_root *root)
2629 {
2630         struct btrfs_block_rsv *global_rsv;
2631         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2632         u64 num_bytes;
2633         int ret = 0;
2634
2635         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2636         num_heads = heads_to_leaves(root, num_heads);
2637         if (num_heads > 1)
2638                 num_bytes += (num_heads - 1) * root->leafsize;
2639         num_bytes <<= 1;
2640         global_rsv = &root->fs_info->global_block_rsv;
2641
2642         /*
2643          * If we can't allocate any more chunks lets make sure we have _lots_ of
2644          * wiggle room since running delayed refs can create more delayed refs.
2645          */
2646         if (global_rsv->space_info->full)
2647                 num_bytes <<= 1;
2648
2649         spin_lock(&global_rsv->lock);
2650         if (global_rsv->reserved <= num_bytes)
2651                 ret = 1;
2652         spin_unlock(&global_rsv->lock);
2653         return ret;
2654 }
2655
2656 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2657                                        struct btrfs_root *root)
2658 {
2659         struct btrfs_fs_info *fs_info = root->fs_info;
2660         u64 num_entries =
2661                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2662         u64 avg_runtime;
2663
2664         smp_mb();
2665         avg_runtime = fs_info->avg_delayed_ref_runtime;
2666         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2667                 return 1;
2668
2669         return btrfs_check_space_for_delayed_refs(trans, root);
2670 }
2671
2672 /*
2673  * this starts processing the delayed reference count updates and
2674  * extent insertions we have queued up so far.  count can be
2675  * 0, which means to process everything in the tree at the start
2676  * of the run (but not newly added entries), or it can be some target
2677  * number you'd like to process.
2678  *
2679  * Returns 0 on success or if called with an aborted transaction
2680  * Returns <0 on error and aborts the transaction
2681  */
2682 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2683                            struct btrfs_root *root, unsigned long count)
2684 {
2685         struct rb_node *node;
2686         struct btrfs_delayed_ref_root *delayed_refs;
2687         struct btrfs_delayed_ref_head *head;
2688         int ret;
2689         int run_all = count == (unsigned long)-1;
2690         int run_most = 0;
2691
2692         /* We'll clean this up in btrfs_cleanup_transaction */
2693         if (trans->aborted)
2694                 return 0;
2695
2696         if (root == root->fs_info->extent_root)
2697                 root = root->fs_info->tree_root;
2698
2699         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2700
2701         delayed_refs = &trans->transaction->delayed_refs;
2702         if (count == 0) {
2703                 count = atomic_read(&delayed_refs->num_entries) * 2;
2704                 run_most = 1;
2705         }
2706
2707 again:
2708 #ifdef SCRAMBLE_DELAYED_REFS
2709         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2710 #endif
2711         ret = __btrfs_run_delayed_refs(trans, root, count);
2712         if (ret < 0) {
2713                 btrfs_abort_transaction(trans, root, ret);
2714                 return ret;
2715         }
2716
2717         if (run_all) {
2718                 if (!list_empty(&trans->new_bgs))
2719                         btrfs_create_pending_block_groups(trans, root);
2720
2721                 spin_lock(&delayed_refs->lock);
2722                 node = rb_first(&delayed_refs->href_root);
2723                 if (!node) {
2724                         spin_unlock(&delayed_refs->lock);
2725                         goto out;
2726                 }
2727                 count = (unsigned long)-1;
2728
2729                 while (node) {
2730                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2731                                         href_node);
2732                         if (btrfs_delayed_ref_is_head(&head->node)) {
2733                                 struct btrfs_delayed_ref_node *ref;
2734
2735                                 ref = &head->node;
2736                                 atomic_inc(&ref->refs);
2737
2738                                 spin_unlock(&delayed_refs->lock);
2739                                 /*
2740                                  * Mutex was contended, block until it's
2741                                  * released and try again
2742                                  */
2743                                 mutex_lock(&head->mutex);
2744                                 mutex_unlock(&head->mutex);
2745
2746                                 btrfs_put_delayed_ref(ref);
2747                                 cond_resched();
2748                                 goto again;
2749                         } else {
2750                                 WARN_ON(1);
2751                         }
2752                         node = rb_next(node);
2753                 }
2754                 spin_unlock(&delayed_refs->lock);
2755                 cond_resched();
2756                 goto again;
2757         }
2758 out:
2759         assert_qgroups_uptodate(trans);
2760         return 0;
2761 }
2762
2763 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2764                                 struct btrfs_root *root,
2765                                 u64 bytenr, u64 num_bytes, u64 flags,
2766                                 int level, int is_data)
2767 {
2768         struct btrfs_delayed_extent_op *extent_op;
2769         int ret;
2770
2771         extent_op = btrfs_alloc_delayed_extent_op();
2772         if (!extent_op)
2773                 return -ENOMEM;
2774
2775         extent_op->flags_to_set = flags;
2776         extent_op->update_flags = 1;
2777         extent_op->update_key = 0;
2778         extent_op->is_data = is_data ? 1 : 0;
2779         extent_op->level = level;
2780
2781         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2782                                           num_bytes, extent_op);
2783         if (ret)
2784                 btrfs_free_delayed_extent_op(extent_op);
2785         return ret;
2786 }
2787
2788 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2789                                       struct btrfs_root *root,
2790                                       struct btrfs_path *path,
2791                                       u64 objectid, u64 offset, u64 bytenr)
2792 {
2793         struct btrfs_delayed_ref_head *head;
2794         struct btrfs_delayed_ref_node *ref;
2795         struct btrfs_delayed_data_ref *data_ref;
2796         struct btrfs_delayed_ref_root *delayed_refs;
2797         struct rb_node *node;
2798         int ret = 0;
2799
2800         delayed_refs = &trans->transaction->delayed_refs;
2801         spin_lock(&delayed_refs->lock);
2802         head = btrfs_find_delayed_ref_head(trans, bytenr);
2803         if (!head) {
2804                 spin_unlock(&delayed_refs->lock);
2805                 return 0;
2806         }
2807
2808         if (!mutex_trylock(&head->mutex)) {
2809                 atomic_inc(&head->node.refs);
2810                 spin_unlock(&delayed_refs->lock);
2811
2812                 btrfs_release_path(path);
2813
2814                 /*
2815                  * Mutex was contended, block until it's released and let
2816                  * caller try again
2817                  */
2818                 mutex_lock(&head->mutex);
2819                 mutex_unlock(&head->mutex);
2820                 btrfs_put_delayed_ref(&head->node);
2821                 return -EAGAIN;
2822         }
2823         spin_unlock(&delayed_refs->lock);
2824
2825         spin_lock(&head->lock);
2826         node = rb_first(&head->ref_root);
2827         while (node) {
2828                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2829                 node = rb_next(node);
2830
2831                 /* If it's a shared ref we know a cross reference exists */
2832                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2833                         ret = 1;
2834                         break;
2835                 }
2836
2837                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2838
2839                 /*
2840                  * If our ref doesn't match the one we're currently looking at
2841                  * then we have a cross reference.
2842                  */
2843                 if (data_ref->root != root->root_key.objectid ||
2844                     data_ref->objectid != objectid ||
2845                     data_ref->offset != offset) {
2846                         ret = 1;
2847                         break;
2848                 }
2849         }
2850         spin_unlock(&head->lock);
2851         mutex_unlock(&head->mutex);
2852         return ret;
2853 }
2854
2855 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2856                                         struct btrfs_root *root,
2857                                         struct btrfs_path *path,
2858                                         u64 objectid, u64 offset, u64 bytenr)
2859 {
2860         struct btrfs_root *extent_root = root->fs_info->extent_root;
2861         struct extent_buffer *leaf;
2862         struct btrfs_extent_data_ref *ref;
2863         struct btrfs_extent_inline_ref *iref;
2864         struct btrfs_extent_item *ei;
2865         struct btrfs_key key;
2866         u32 item_size;
2867         int ret;
2868
2869         key.objectid = bytenr;
2870         key.offset = (u64)-1;
2871         key.type = BTRFS_EXTENT_ITEM_KEY;
2872
2873         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2874         if (ret < 0)
2875                 goto out;
2876         BUG_ON(ret == 0); /* Corruption */
2877
2878         ret = -ENOENT;
2879         if (path->slots[0] == 0)
2880                 goto out;
2881
2882         path->slots[0]--;
2883         leaf = path->nodes[0];
2884         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2885
2886         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2887                 goto out;
2888
2889         ret = 1;
2890         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2891 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2892         if (item_size < sizeof(*ei)) {
2893                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2894                 goto out;
2895         }
2896 #endif
2897         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2898
2899         if (item_size != sizeof(*ei) +
2900             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2901                 goto out;
2902
2903         if (btrfs_extent_generation(leaf, ei) <=
2904             btrfs_root_last_snapshot(&root->root_item))
2905                 goto out;
2906
2907         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2908         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2909             BTRFS_EXTENT_DATA_REF_KEY)
2910                 goto out;
2911
2912         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2913         if (btrfs_extent_refs(leaf, ei) !=
2914             btrfs_extent_data_ref_count(leaf, ref) ||
2915             btrfs_extent_data_ref_root(leaf, ref) !=
2916             root->root_key.objectid ||
2917             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2918             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2919                 goto out;
2920
2921         ret = 0;
2922 out:
2923         return ret;
2924 }
2925
2926 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2927                           struct btrfs_root *root,
2928                           u64 objectid, u64 offset, u64 bytenr)
2929 {
2930         struct btrfs_path *path;
2931         int ret;
2932         int ret2;
2933
2934         path = btrfs_alloc_path();
2935         if (!path)
2936                 return -ENOENT;
2937
2938         do {
2939                 ret = check_committed_ref(trans, root, path, objectid,
2940                                           offset, bytenr);
2941                 if (ret && ret != -ENOENT)
2942                         goto out;
2943
2944                 ret2 = check_delayed_ref(trans, root, path, objectid,
2945                                          offset, bytenr);
2946         } while (ret2 == -EAGAIN);
2947
2948         if (ret2 && ret2 != -ENOENT) {
2949                 ret = ret2;
2950                 goto out;
2951         }
2952
2953         if (ret != -ENOENT || ret2 != -ENOENT)
2954                 ret = 0;
2955 out:
2956         btrfs_free_path(path);
2957         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2958                 WARN_ON(ret > 0);
2959         return ret;
2960 }
2961
2962 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2963                            struct btrfs_root *root,
2964                            struct extent_buffer *buf,
2965                            int full_backref, int inc, int for_cow)
2966 {
2967         u64 bytenr;
2968         u64 num_bytes;
2969         u64 parent;
2970         u64 ref_root;
2971         u32 nritems;
2972         struct btrfs_key key;
2973         struct btrfs_file_extent_item *fi;
2974         int i;
2975         int level;
2976         int ret = 0;
2977         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2978                             u64, u64, u64, u64, u64, u64, int);
2979
2980         ref_root = btrfs_header_owner(buf);
2981         nritems = btrfs_header_nritems(buf);
2982         level = btrfs_header_level(buf);
2983
2984         if (!root->ref_cows && level == 0)
2985                 return 0;
2986
2987         if (inc)
2988                 process_func = btrfs_inc_extent_ref;
2989         else
2990                 process_func = btrfs_free_extent;
2991
2992         if (full_backref)
2993                 parent = buf->start;
2994         else
2995                 parent = 0;
2996
2997         for (i = 0; i < nritems; i++) {
2998                 if (level == 0) {
2999                         btrfs_item_key_to_cpu(buf, &key, i);
3000                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3001                                 continue;
3002                         fi = btrfs_item_ptr(buf, i,
3003                                             struct btrfs_file_extent_item);
3004                         if (btrfs_file_extent_type(buf, fi) ==
3005                             BTRFS_FILE_EXTENT_INLINE)
3006                                 continue;
3007                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3008                         if (bytenr == 0)
3009                                 continue;
3010
3011                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3012                         key.offset -= btrfs_file_extent_offset(buf, fi);
3013                         ret = process_func(trans, root, bytenr, num_bytes,
3014                                            parent, ref_root, key.objectid,
3015                                            key.offset, for_cow);
3016                         if (ret)
3017                                 goto fail;
3018                 } else {
3019                         bytenr = btrfs_node_blockptr(buf, i);
3020                         num_bytes = btrfs_level_size(root, level - 1);
3021                         ret = process_func(trans, root, bytenr, num_bytes,
3022                                            parent, ref_root, level - 1, 0,
3023                                            for_cow);
3024                         if (ret)
3025                                 goto fail;
3026                 }
3027         }
3028         return 0;
3029 fail:
3030         return ret;
3031 }
3032
3033 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3034                   struct extent_buffer *buf, int full_backref, int for_cow)
3035 {
3036         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3037 }
3038
3039 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3040                   struct extent_buffer *buf, int full_backref, int for_cow)
3041 {
3042         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3043 }
3044
3045 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3046                                  struct btrfs_root *root,
3047                                  struct btrfs_path *path,
3048                                  struct btrfs_block_group_cache *cache)
3049 {
3050         int ret;
3051         struct btrfs_root *extent_root = root->fs_info->extent_root;
3052         unsigned long bi;
3053         struct extent_buffer *leaf;
3054
3055         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3056         if (ret < 0)
3057                 goto fail;
3058         BUG_ON(ret); /* Corruption */
3059
3060         leaf = path->nodes[0];
3061         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3062         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3063         btrfs_mark_buffer_dirty(leaf);
3064         btrfs_release_path(path);
3065 fail:
3066         if (ret) {
3067                 btrfs_abort_transaction(trans, root, ret);
3068                 return ret;
3069         }
3070         return 0;
3071
3072 }
3073
3074 static struct btrfs_block_group_cache *
3075 next_block_group(struct btrfs_root *root,
3076                  struct btrfs_block_group_cache *cache)
3077 {
3078         struct rb_node *node;
3079         spin_lock(&root->fs_info->block_group_cache_lock);
3080         node = rb_next(&cache->cache_node);
3081         btrfs_put_block_group(cache);
3082         if (node) {
3083                 cache = rb_entry(node, struct btrfs_block_group_cache,
3084                                  cache_node);
3085                 btrfs_get_block_group(cache);
3086         } else
3087                 cache = NULL;
3088         spin_unlock(&root->fs_info->block_group_cache_lock);
3089         return cache;
3090 }
3091
3092 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3093                             struct btrfs_trans_handle *trans,
3094                             struct btrfs_path *path)
3095 {
3096         struct btrfs_root *root = block_group->fs_info->tree_root;
3097         struct inode *inode = NULL;
3098         u64 alloc_hint = 0;
3099         int dcs = BTRFS_DC_ERROR;
3100         int num_pages = 0;
3101         int retries = 0;
3102         int ret = 0;
3103
3104         /*
3105          * If this block group is smaller than 100 megs don't bother caching the
3106          * block group.
3107          */
3108         if (block_group->key.offset < (100 * 1024 * 1024)) {
3109                 spin_lock(&block_group->lock);
3110                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3111                 spin_unlock(&block_group->lock);
3112                 return 0;
3113         }
3114
3115 again:
3116         inode = lookup_free_space_inode(root, block_group, path);
3117         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3118                 ret = PTR_ERR(inode);
3119                 btrfs_release_path(path);
3120                 goto out;
3121         }
3122
3123         if (IS_ERR(inode)) {
3124                 BUG_ON(retries);
3125                 retries++;
3126
3127                 if (block_group->ro)
3128                         goto out_free;
3129
3130                 ret = create_free_space_inode(root, trans, block_group, path);
3131                 if (ret)
3132                         goto out_free;
3133                 goto again;
3134         }
3135
3136         /* We've already setup this transaction, go ahead and exit */
3137         if (block_group->cache_generation == trans->transid &&
3138             i_size_read(inode)) {
3139                 dcs = BTRFS_DC_SETUP;
3140                 goto out_put;
3141         }
3142
3143         /*
3144          * We want to set the generation to 0, that way if anything goes wrong
3145          * from here on out we know not to trust this cache when we load up next
3146          * time.
3147          */
3148         BTRFS_I(inode)->generation = 0;
3149         ret = btrfs_update_inode(trans, root, inode);
3150         WARN_ON(ret);
3151
3152         if (i_size_read(inode) > 0) {
3153                 ret = btrfs_check_trunc_cache_free_space(root,
3154                                         &root->fs_info->global_block_rsv);
3155                 if (ret)
3156                         goto out_put;
3157
3158                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3159                 if (ret)
3160                         goto out_put;
3161         }
3162
3163         spin_lock(&block_group->lock);
3164         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3165             !btrfs_test_opt(root, SPACE_CACHE)) {
3166                 /*
3167                  * don't bother trying to write stuff out _if_
3168                  * a) we're not cached,
3169                  * b) we're with nospace_cache mount option.
3170                  */
3171                 dcs = BTRFS_DC_WRITTEN;
3172                 spin_unlock(&block_group->lock);
3173                 goto out_put;
3174         }
3175         spin_unlock(&block_group->lock);
3176
3177         /*
3178          * Try to preallocate enough space based on how big the block group is.
3179          * Keep in mind this has to include any pinned space which could end up
3180          * taking up quite a bit since it's not folded into the other space
3181          * cache.
3182          */
3183         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3184         if (!num_pages)
3185                 num_pages = 1;
3186
3187         num_pages *= 16;
3188         num_pages *= PAGE_CACHE_SIZE;
3189
3190         ret = btrfs_check_data_free_space(inode, num_pages);
3191         if (ret)
3192                 goto out_put;
3193
3194         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3195                                               num_pages, num_pages,
3196                                               &alloc_hint);
3197         if (!ret)
3198                 dcs = BTRFS_DC_SETUP;
3199         btrfs_free_reserved_data_space(inode, num_pages);
3200
3201 out_put:
3202         iput(inode);
3203 out_free:
3204         btrfs_release_path(path);
3205 out:
3206         spin_lock(&block_group->lock);
3207         if (!ret && dcs == BTRFS_DC_SETUP)
3208                 block_group->cache_generation = trans->transid;
3209         block_group->disk_cache_state = dcs;
3210         spin_unlock(&block_group->lock);
3211
3212         return ret;
3213 }
3214
3215 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3216                                    struct btrfs_root *root)
3217 {
3218         struct btrfs_block_group_cache *cache;
3219         int err = 0;
3220         struct btrfs_path *path;
3221         u64 last = 0;
3222
3223         path = btrfs_alloc_path();
3224         if (!path)
3225                 return -ENOMEM;
3226
3227 again:
3228         while (1) {
3229                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3230                 while (cache) {
3231                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3232                                 break;
3233                         cache = next_block_group(root, cache);
3234                 }
3235                 if (!cache) {
3236                         if (last == 0)
3237                                 break;
3238                         last = 0;
3239                         continue;
3240                 }
3241                 err = cache_save_setup(cache, trans, path);
3242                 last = cache->key.objectid + cache->key.offset;
3243                 btrfs_put_block_group(cache);
3244         }
3245
3246         while (1) {
3247                 if (last == 0) {
3248                         err = btrfs_run_delayed_refs(trans, root,
3249                                                      (unsigned long)-1);
3250                         if (err) /* File system offline */
3251                                 goto out;
3252                 }
3253
3254                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3255                 while (cache) {
3256                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3257                                 btrfs_put_block_group(cache);
3258                                 goto again;
3259                         }
3260
3261                         if (cache->dirty)
3262                                 break;
3263                         cache = next_block_group(root, cache);
3264                 }
3265                 if (!cache) {
3266                         if (last == 0)
3267                                 break;
3268                         last = 0;
3269                         continue;
3270                 }
3271
3272                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3273                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3274                 cache->dirty = 0;
3275                 last = cache->key.objectid + cache->key.offset;
3276
3277                 err = write_one_cache_group(trans, root, path, cache);
3278                 btrfs_put_block_group(cache);
3279                 if (err) /* File system offline */
3280                         goto out;
3281         }
3282
3283         while (1) {
3284                 /*
3285                  * I don't think this is needed since we're just marking our
3286                  * preallocated extent as written, but just in case it can't
3287                  * hurt.
3288                  */
3289                 if (last == 0) {
3290                         err = btrfs_run_delayed_refs(trans, root,
3291                                                      (unsigned long)-1);
3292                         if (err) /* File system offline */
3293                                 goto out;
3294                 }
3295
3296                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3297                 while (cache) {
3298                         /*
3299                          * Really this shouldn't happen, but it could if we
3300                          * couldn't write the entire preallocated extent and
3301                          * splitting the extent resulted in a new block.
3302                          */
3303                         if (cache->dirty) {
3304                                 btrfs_put_block_group(cache);
3305                                 goto again;
3306                         }
3307                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3308                                 break;
3309                         cache = next_block_group(root, cache);
3310                 }
3311                 if (!cache) {
3312                         if (last == 0)
3313                                 break;
3314                         last = 0;
3315                         continue;
3316                 }
3317
3318                 err = btrfs_write_out_cache(root, trans, cache, path);
3319
3320                 /*
3321                  * If we didn't have an error then the cache state is still
3322                  * NEED_WRITE, so we can set it to WRITTEN.
3323                  */
3324                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3325                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3326                 last = cache->key.objectid + cache->key.offset;
3327                 btrfs_put_block_group(cache);
3328         }
3329 out:
3330
3331         btrfs_free_path(path);
3332         return err;
3333 }
3334
3335 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3336 {
3337         struct btrfs_block_group_cache *block_group;
3338         int readonly = 0;
3339
3340         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3341         if (!block_group || block_group->ro)
3342                 readonly = 1;
3343         if (block_group)
3344                 btrfs_put_block_group(block_group);
3345         return readonly;
3346 }
3347
3348 static const char *alloc_name(u64 flags)
3349 {
3350         switch (flags) {
3351         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3352                 return "mixed";
3353         case BTRFS_BLOCK_GROUP_METADATA:
3354                 return "metadata";
3355         case BTRFS_BLOCK_GROUP_DATA:
3356                 return "data";
3357         case BTRFS_BLOCK_GROUP_SYSTEM:
3358                 return "system";
3359         default:
3360                 WARN_ON(1);
3361                 return "invalid-combination";
3362         };
3363 }
3364
3365 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3366                              u64 total_bytes, u64 bytes_used,
3367                              struct btrfs_space_info **space_info)
3368 {
3369         struct btrfs_space_info *found;
3370         int i;
3371         int factor;
3372         int ret;
3373
3374         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3375                      BTRFS_BLOCK_GROUP_RAID10))
3376                 factor = 2;
3377         else
3378                 factor = 1;
3379
3380         found = __find_space_info(info, flags);
3381         if (found) {
3382                 spin_lock(&found->lock);
3383                 found->total_bytes += total_bytes;
3384                 found->disk_total += total_bytes * factor;
3385                 found->bytes_used += bytes_used;
3386                 found->disk_used += bytes_used * factor;
3387                 found->full = 0;
3388                 spin_unlock(&found->lock);
3389                 *space_info = found;
3390                 return 0;
3391         }
3392         found = kzalloc(sizeof(*found), GFP_NOFS);
3393         if (!found)
3394                 return -ENOMEM;
3395
3396         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3397         if (ret) {
3398                 kfree(found);
3399                 return ret;
3400         }
3401
3402         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3403                 INIT_LIST_HEAD(&found->block_groups[i]);
3404                 kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
3405         }
3406         init_rwsem(&found->groups_sem);
3407         spin_lock_init(&found->lock);
3408         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3409         found->total_bytes = total_bytes;
3410         found->disk_total = total_bytes * factor;
3411         found->bytes_used = bytes_used;
3412         found->disk_used = bytes_used * factor;
3413         found->bytes_pinned = 0;
3414         found->bytes_reserved = 0;
3415         found->bytes_readonly = 0;
3416         found->bytes_may_use = 0;
3417         found->full = 0;
3418         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3419         found->chunk_alloc = 0;
3420         found->flush = 0;
3421         init_waitqueue_head(&found->wait);
3422
3423         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3424                                     info->space_info_kobj, "%s",
3425                                     alloc_name(found->flags));
3426         if (ret) {
3427                 kfree(found);
3428                 return ret;
3429         }
3430
3431         *space_info = found;
3432         list_add_rcu(&found->list, &info->space_info);
3433         if (flags & BTRFS_BLOCK_GROUP_DATA)
3434                 info->data_sinfo = found;
3435
3436         return ret;
3437 }
3438
3439 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3440 {
3441         u64 extra_flags = chunk_to_extended(flags) &
3442                                 BTRFS_EXTENDED_PROFILE_MASK;
3443
3444         write_seqlock(&fs_info->profiles_lock);
3445         if (flags & BTRFS_BLOCK_GROUP_DATA)
3446                 fs_info->avail_data_alloc_bits |= extra_flags;
3447         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3448                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3449         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3450                 fs_info->avail_system_alloc_bits |= extra_flags;
3451         write_sequnlock(&fs_info->profiles_lock);
3452 }
3453
3454 /*
3455  * returns target flags in extended format or 0 if restripe for this
3456  * chunk_type is not in progress
3457  *
3458  * should be called with either volume_mutex or balance_lock held
3459  */
3460 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3461 {
3462         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3463         u64 target = 0;
3464
3465         if (!bctl)
3466                 return 0;
3467
3468         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3469             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3470                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3471         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3472                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3473                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3474         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3475                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3476                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3477         }
3478
3479         return target;
3480 }
3481
3482 /*
3483  * @flags: available profiles in extended format (see ctree.h)
3484  *
3485  * Returns reduced profile in chunk format.  If profile changing is in
3486  * progress (either running or paused) picks the target profile (if it's
3487  * already available), otherwise falls back to plain reducing.
3488  */
3489 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3490 {
3491         /*
3492          * we add in the count of missing devices because we want
3493          * to make sure that any RAID levels on a degraded FS
3494          * continue to be honored.
3495          */
3496         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3497                 root->fs_info->fs_devices->missing_devices;
3498         u64 target;
3499         u64 tmp;
3500
3501         /*
3502          * see if restripe for this chunk_type is in progress, if so
3503          * try to reduce to the target profile
3504          */
3505         spin_lock(&root->fs_info->balance_lock);
3506         target = get_restripe_target(root->fs_info, flags);
3507         if (target) {
3508                 /* pick target profile only if it's already available */
3509                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3510                         spin_unlock(&root->fs_info->balance_lock);
3511                         return extended_to_chunk(target);
3512                 }
3513         }
3514         spin_unlock(&root->fs_info->balance_lock);
3515
3516         /* First, mask out the RAID levels which aren't possible */
3517         if (num_devices == 1)
3518                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3519                            BTRFS_BLOCK_GROUP_RAID5);
3520         if (num_devices < 3)
3521                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3522         if (num_devices < 4)
3523                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3524
3525         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3526                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3527                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3528         flags &= ~tmp;
3529
3530         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3531                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3532         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3533                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3534         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3535                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3536         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3537                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3538         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3539                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3540
3541         return extended_to_chunk(flags | tmp);
3542 }
3543
3544 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3545 {
3546         unsigned seq;
3547
3548         do {
3549                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3550
3551                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3552                         flags |= root->fs_info->avail_data_alloc_bits;
3553                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3554                         flags |= root->fs_info->avail_system_alloc_bits;
3555                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3556                         flags |= root->fs_info->avail_metadata_alloc_bits;
3557         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3558
3559         return btrfs_reduce_alloc_profile(root, flags);
3560 }
3561
3562 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3563 {
3564         u64 flags;
3565         u64 ret;
3566
3567         if (data)
3568                 flags = BTRFS_BLOCK_GROUP_DATA;
3569         else if (root == root->fs_info->chunk_root)
3570                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3571         else
3572                 flags = BTRFS_BLOCK_GROUP_METADATA;
3573
3574         ret = get_alloc_profile(root, flags);
3575         return ret;
3576 }
3577
3578 /*
3579  * This will check the space that the inode allocates from to make sure we have
3580  * enough space for bytes.
3581  */
3582 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3583 {
3584         struct btrfs_space_info *data_sinfo;
3585         struct btrfs_root *root = BTRFS_I(inode)->root;
3586         struct btrfs_fs_info *fs_info = root->fs_info;
3587         u64 used;
3588         int ret = 0, committed = 0, alloc_chunk = 1;
3589
3590         /* make sure bytes are sectorsize aligned */
3591         bytes = ALIGN(bytes, root->sectorsize);
3592
3593         if (btrfs_is_free_space_inode(inode)) {
3594                 committed = 1;
3595                 ASSERT(current->journal_info);
3596         }
3597
3598         data_sinfo = fs_info->data_sinfo;
3599         if (!data_sinfo)
3600                 goto alloc;
3601
3602 again:
3603         /* make sure we have enough space to handle the data first */
3604         spin_lock(&data_sinfo->lock);
3605         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3606                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3607                 data_sinfo->bytes_may_use;
3608
3609         if (used + bytes > data_sinfo->total_bytes) {
3610                 struct btrfs_trans_handle *trans;
3611
3612                 /*
3613                  * if we don't have enough free bytes in this space then we need
3614                  * to alloc a new chunk.
3615                  */
3616                 if (!data_sinfo->full && alloc_chunk) {
3617                         u64 alloc_target;
3618
3619                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3620                         spin_unlock(&data_sinfo->lock);
3621 alloc:
3622                         alloc_target = btrfs_get_alloc_profile(root, 1);
3623                         /*
3624                          * It is ugly that we don't call nolock join
3625                          * transaction for the free space inode case here.
3626                          * But it is safe because we only do the data space
3627                          * reservation for the free space cache in the
3628                          * transaction context, the common join transaction
3629                          * just increase the counter of the current transaction
3630                          * handler, doesn't try to acquire the trans_lock of
3631                          * the fs.
3632                          */
3633                         trans = btrfs_join_transaction(root);
3634                         if (IS_ERR(trans))
3635                                 return PTR_ERR(trans);
3636
3637                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3638                                              alloc_target,
3639                                              CHUNK_ALLOC_NO_FORCE);
3640                         btrfs_end_transaction(trans, root);
3641                         if (ret < 0) {
3642                                 if (ret != -ENOSPC)
3643                                         return ret;
3644                                 else
3645                                         goto commit_trans;
3646                         }
3647
3648                         if (!data_sinfo)
3649                                 data_sinfo = fs_info->data_sinfo;
3650
3651                         goto again;
3652                 }
3653
3654                 /*
3655                  * If we don't have enough pinned space to deal with this
3656                  * allocation don't bother committing the transaction.
3657                  */
3658                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3659                                            bytes) < 0)
3660                         committed = 1;
3661                 spin_unlock(&data_sinfo->lock);
3662
3663                 /* commit the current transaction and try again */
3664 commit_trans:
3665                 if (!committed &&
3666                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3667                         committed = 1;
3668
3669                         trans = btrfs_join_transaction(root);
3670                         if (IS_ERR(trans))
3671                                 return PTR_ERR(trans);
3672                         ret = btrfs_commit_transaction(trans, root);
3673                         if (ret)
3674                                 return ret;
3675                         goto again;
3676                 }
3677
3678                 trace_btrfs_space_reservation(root->fs_info,
3679                                               "space_info:enospc",
3680                                               data_sinfo->flags, bytes, 1);
3681                 return -ENOSPC;
3682         }
3683         data_sinfo->bytes_may_use += bytes;
3684         trace_btrfs_space_reservation(root->fs_info, "space_info",
3685                                       data_sinfo->flags, bytes, 1);
3686         spin_unlock(&data_sinfo->lock);
3687
3688         return 0;
3689 }
3690
3691 /*
3692  * Called if we need to clear a data reservation for this inode.
3693  */
3694 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3695 {
3696         struct btrfs_root *root = BTRFS_I(inode)->root;
3697         struct btrfs_space_info *data_sinfo;
3698
3699         /* make sure bytes are sectorsize aligned */
3700         bytes = ALIGN(bytes, root->sectorsize);
3701
3702         data_sinfo = root->fs_info->data_sinfo;
3703         spin_lock(&data_sinfo->lock);
3704         WARN_ON(data_sinfo->bytes_may_use < bytes);
3705         data_sinfo->bytes_may_use -= bytes;
3706         trace_btrfs_space_reservation(root->fs_info, "space_info",
3707                                       data_sinfo->flags, bytes, 0);
3708         spin_unlock(&data_sinfo->lock);
3709 }
3710
3711 static void force_metadata_allocation(struct btrfs_fs_info *info)
3712 {
3713         struct list_head *head = &info->space_info;
3714         struct btrfs_space_info *found;
3715
3716         rcu_read_lock();
3717         list_for_each_entry_rcu(found, head, list) {
3718                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3719                         found->force_alloc = CHUNK_ALLOC_FORCE;
3720         }
3721         rcu_read_unlock();
3722 }
3723
3724 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3725 {
3726         return (global->size << 1);
3727 }
3728
3729 static int should_alloc_chunk(struct btrfs_root *root,
3730                               struct btrfs_space_info *sinfo, int force)
3731 {
3732         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3733         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3734         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3735         u64 thresh;
3736
3737         if (force == CHUNK_ALLOC_FORCE)
3738                 return 1;
3739
3740         /*
3741          * We need to take into account the global rsv because for all intents
3742          * and purposes it's used space.  Don't worry about locking the
3743          * global_rsv, it doesn't change except when the transaction commits.
3744          */
3745         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3746                 num_allocated += calc_global_rsv_need_space(global_rsv);
3747
3748         /*
3749          * in limited mode, we want to have some free space up to
3750          * about 1% of the FS size.
3751          */
3752         if (force == CHUNK_ALLOC_LIMITED) {
3753                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3754                 thresh = max_t(u64, 64 * 1024 * 1024,
3755                                div_factor_fine(thresh, 1));
3756
3757                 if (num_bytes - num_allocated < thresh)
3758                         return 1;
3759         }
3760
3761         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3762                 return 0;
3763         return 1;
3764 }
3765
3766 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3767 {
3768         u64 num_dev;
3769
3770         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3771                     BTRFS_BLOCK_GROUP_RAID0 |
3772                     BTRFS_BLOCK_GROUP_RAID5 |
3773                     BTRFS_BLOCK_GROUP_RAID6))
3774                 num_dev = root->fs_info->fs_devices->rw_devices;
3775         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3776                 num_dev = 2;
3777         else
3778                 num_dev = 1;    /* DUP or single */
3779
3780         /* metadata for updaing devices and chunk tree */
3781         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3782 }
3783
3784 static void check_system_chunk(struct btrfs_trans_handle *trans,
3785                                struct btrfs_root *root, u64 type)
3786 {
3787         struct btrfs_space_info *info;
3788         u64 left;
3789         u64 thresh;
3790
3791         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3792         spin_lock(&info->lock);
3793         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3794                 info->bytes_reserved - info->bytes_readonly;
3795         spin_unlock(&info->lock);
3796
3797         thresh = get_system_chunk_thresh(root, type);
3798         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3799                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3800                         left, thresh, type);
3801                 dump_space_info(info, 0, 0);
3802         }
3803
3804         if (left < thresh) {
3805                 u64 flags;
3806
3807                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3808                 btrfs_alloc_chunk(trans, root, flags);
3809         }
3810 }
3811
3812 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3813                           struct btrfs_root *extent_root, u64 flags, int force)
3814 {
3815         struct btrfs_space_info *space_info;
3816         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3817         int wait_for_alloc = 0;
3818         int ret = 0;
3819
3820         /* Don't re-enter if we're already allocating a chunk */
3821         if (trans->allocating_chunk)
3822                 return -ENOSPC;
3823
3824         space_info = __find_space_info(extent_root->fs_info, flags);
3825         if (!space_info) {
3826                 ret = update_space_info(extent_root->fs_info, flags,
3827                                         0, 0, &space_info);
3828                 BUG_ON(ret); /* -ENOMEM */
3829         }
3830         BUG_ON(!space_info); /* Logic error */
3831
3832 again:
3833         spin_lock(&space_info->lock);
3834         if (force < space_info->force_alloc)
3835                 force = space_info->force_alloc;
3836         if (space_info->full) {
3837                 if (should_alloc_chunk(extent_root, space_info, force))
3838                         ret = -ENOSPC;
3839                 else
3840                         ret = 0;
3841                 spin_unlock(&space_info->lock);
3842                 return ret;
3843         }
3844
3845         if (!should_alloc_chunk(extent_root, space_info, force)) {
3846                 spin_unlock(&space_info->lock);
3847                 return 0;
3848         } else if (space_info->chunk_alloc) {
3849                 wait_for_alloc = 1;
3850         } else {
3851                 space_info->chunk_alloc = 1;
3852         }
3853
3854         spin_unlock(&space_info->lock);
3855
3856         mutex_lock(&fs_info->chunk_mutex);
3857
3858         /*
3859          * The chunk_mutex is held throughout the entirety of a chunk
3860          * allocation, so once we've acquired the chunk_mutex we know that the
3861          * other guy is done and we need to recheck and see if we should
3862          * allocate.
3863          */
3864         if (wait_for_alloc) {
3865                 mutex_unlock(&fs_info->chunk_mutex);
3866                 wait_for_alloc = 0;
3867                 goto again;
3868         }
3869
3870         trans->allocating_chunk = true;
3871
3872         /*
3873          * If we have mixed data/metadata chunks we want to make sure we keep
3874          * allocating mixed chunks instead of individual chunks.
3875          */
3876         if (btrfs_mixed_space_info(space_info))
3877                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3878
3879         /*
3880          * if we're doing a data chunk, go ahead and make sure that
3881          * we keep a reasonable number of metadata chunks allocated in the
3882          * FS as well.
3883          */
3884         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3885                 fs_info->data_chunk_allocations++;
3886                 if (!(fs_info->data_chunk_allocations %
3887                       fs_info->metadata_ratio))
3888                         force_metadata_allocation(fs_info);
3889         }
3890
3891         /*
3892          * Check if we have enough space in SYSTEM chunk because we may need
3893          * to update devices.
3894          */
3895         check_system_chunk(trans, extent_root, flags);
3896
3897         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3898         trans->allocating_chunk = false;
3899
3900         spin_lock(&space_info->lock);
3901         if (ret < 0 && ret != -ENOSPC)
3902                 goto out;
3903         if (ret)
3904                 space_info->full = 1;
3905         else
3906                 ret = 1;
3907
3908         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3909 out:
3910         space_info->chunk_alloc = 0;
3911         spin_unlock(&space_info->lock);
3912         mutex_unlock(&fs_info->chunk_mutex);
3913         return ret;
3914 }
3915
3916 static int can_overcommit(struct btrfs_root *root,
3917                           struct btrfs_space_info *space_info, u64 bytes,
3918                           enum btrfs_reserve_flush_enum flush)
3919 {
3920         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3921         u64 profile = btrfs_get_alloc_profile(root, 0);
3922         u64 space_size;
3923         u64 avail;
3924         u64 used;
3925
3926         used = space_info->bytes_used + space_info->bytes_reserved +
3927                 space_info->bytes_pinned + space_info->bytes_readonly;
3928
3929         /*
3930          * We only want to allow over committing if we have lots of actual space
3931          * free, but if we don't have enough space to handle the global reserve
3932          * space then we could end up having a real enospc problem when trying
3933          * to allocate a chunk or some other such important allocation.
3934          */
3935         spin_lock(&global_rsv->lock);
3936         space_size = calc_global_rsv_need_space(global_rsv);
3937         spin_unlock(&global_rsv->lock);
3938         if (used + space_size >= space_info->total_bytes)
3939                 return 0;
3940
3941         used += space_info->bytes_may_use;
3942
3943         spin_lock(&root->fs_info->free_chunk_lock);
3944         avail = root->fs_info->free_chunk_space;
3945         spin_unlock(&root->fs_info->free_chunk_lock);
3946
3947         /*
3948          * If we have dup, raid1 or raid10 then only half of the free
3949          * space is actually useable.  For raid56, the space info used
3950          * doesn't include the parity drive, so we don't have to
3951          * change the math
3952          */
3953         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3954                        BTRFS_BLOCK_GROUP_RAID1 |
3955                        BTRFS_BLOCK_GROUP_RAID10))
3956                 avail >>= 1;
3957
3958         /*
3959          * If we aren't flushing all things, let us overcommit up to
3960          * 1/2th of the space. If we can flush, don't let us overcommit
3961          * too much, let it overcommit up to 1/8 of the space.
3962          */
3963         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3964                 avail >>= 3;
3965         else
3966                 avail >>= 1;
3967
3968         if (used + bytes < space_info->total_bytes + avail)
3969                 return 1;
3970         return 0;
3971 }
3972
3973 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3974                                          unsigned long nr_pages)
3975 {
3976         struct super_block *sb = root->fs_info->sb;
3977
3978         if (down_read_trylock(&sb->s_umount)) {
3979                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3980                 up_read(&sb->s_umount);
3981         } else {
3982                 /*
3983                  * We needn't worry the filesystem going from r/w to r/o though
3984                  * we don't acquire ->s_umount mutex, because the filesystem
3985                  * should guarantee the delalloc inodes list be empty after
3986                  * the filesystem is readonly(all dirty pages are written to
3987                  * the disk).
3988                  */
3989                 btrfs_start_delalloc_roots(root->fs_info, 0);
3990                 if (!current->journal_info)
3991                         btrfs_wait_ordered_roots(root->fs_info, -1);
3992         }
3993 }
3994
3995 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
3996 {
3997         u64 bytes;
3998         int nr;
3999
4000         bytes = btrfs_calc_trans_metadata_size(root, 1);
4001         nr = (int)div64_u64(to_reclaim, bytes);
4002         if (!nr)
4003                 nr = 1;
4004         return nr;
4005 }
4006
4007 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4008
4009 /*
4010  * shrink metadata reservation for delalloc
4011  */
4012 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4013                             bool wait_ordered)
4014 {
4015         struct btrfs_block_rsv *block_rsv;
4016         struct btrfs_space_info *space_info;
4017         struct btrfs_trans_handle *trans;
4018         u64 delalloc_bytes;
4019         u64 max_reclaim;
4020         long time_left;
4021         unsigned long nr_pages;
4022         int loops;
4023         int items;
4024         enum btrfs_reserve_flush_enum flush;
4025
4026         /* Calc the number of the pages we need flush for space reservation */
4027         items = calc_reclaim_items_nr(root, to_reclaim);
4028         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4029
4030         trans = (struct btrfs_trans_handle *)current->journal_info;
4031         block_rsv = &root->fs_info->delalloc_block_rsv;
4032         space_info = block_rsv->space_info;
4033
4034         delalloc_bytes = percpu_counter_sum_positive(
4035                                                 &root->fs_info->delalloc_bytes);
4036         if (delalloc_bytes == 0) {
4037                 if (trans)
4038                         return;
4039                 if (wait_ordered)
4040                         btrfs_wait_ordered_roots(root->fs_info, items);
4041                 return;
4042         }
4043
4044         loops = 0;
4045         while (delalloc_bytes && loops < 3) {
4046                 max_reclaim = min(delalloc_bytes, to_reclaim);
4047                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4048                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4049                 /*
4050                  * We need to wait for the async pages to actually start before
4051                  * we do anything.
4052                  */
4053                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4054                 if (!max_reclaim)
4055                         goto skip_async;
4056
4057                 if (max_reclaim <= nr_pages)
4058                         max_reclaim = 0;
4059                 else
4060                         max_reclaim -= nr_pages;
4061
4062                 wait_event(root->fs_info->async_submit_wait,
4063                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4064                            (int)max_reclaim);
4065 skip_async:
4066                 if (!trans)
4067                         flush = BTRFS_RESERVE_FLUSH_ALL;
4068                 else
4069                         flush = BTRFS_RESERVE_NO_FLUSH;
4070                 spin_lock(&space_info->lock);
4071                 if (can_overcommit(root, space_info, orig, flush)) {
4072                         spin_unlock(&space_info->lock);
4073                         break;
4074                 }
4075                 spin_unlock(&space_info->lock);
4076
4077                 loops++;
4078                 if (wait_ordered && !trans) {
4079                         btrfs_wait_ordered_roots(root->fs_info, items);
4080                 } else {
4081                         time_left = schedule_timeout_killable(1);
4082                         if (time_left)
4083                                 break;
4084                 }
4085                 delalloc_bytes = percpu_counter_sum_positive(
4086                                                 &root->fs_info->delalloc_bytes);
4087         }
4088 }
4089
4090 /**
4091  * maybe_commit_transaction - possibly commit the transaction if its ok to
4092  * @root - the root we're allocating for
4093  * @bytes - the number of bytes we want to reserve
4094  * @force - force the commit
4095  *
4096  * This will check to make sure that committing the transaction will actually
4097  * get us somewhere and then commit the transaction if it does.  Otherwise it
4098  * will return -ENOSPC.
4099  */
4100 static int may_commit_transaction(struct btrfs_root *root,
4101                                   struct btrfs_space_info *space_info,
4102                                   u64 bytes, int force)
4103 {
4104         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4105         struct btrfs_trans_handle *trans;
4106
4107         trans = (struct btrfs_trans_handle *)current->journal_info;
4108         if (trans)
4109                 return -EAGAIN;
4110
4111         if (force)
4112                 goto commit;
4113
4114         /* See if there is enough pinned space to make this reservation */
4115         spin_lock(&space_info->lock);
4116         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4117                                    bytes) >= 0) {
4118                 spin_unlock(&space_info->lock);
4119                 goto commit;
4120         }
4121         spin_unlock(&space_info->lock);
4122
4123         /*
4124          * See if there is some space in the delayed insertion reservation for
4125          * this reservation.
4126          */
4127         if (space_info != delayed_rsv->space_info)
4128                 return -ENOSPC;
4129
4130         spin_lock(&space_info->lock);
4131         spin_lock(&delayed_rsv->lock);
4132         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4133                                    bytes - delayed_rsv->size) >= 0) {
4134                 spin_unlock(&delayed_rsv->lock);
4135                 spin_unlock(&space_info->lock);
4136                 return -ENOSPC;
4137         }
4138         spin_unlock(&delayed_rsv->lock);
4139         spin_unlock(&space_info->lock);
4140
4141 commit:
4142         trans = btrfs_join_transaction(root);
4143         if (IS_ERR(trans))
4144                 return -ENOSPC;
4145
4146         return btrfs_commit_transaction(trans, root);
4147 }
4148
4149 enum flush_state {
4150         FLUSH_DELAYED_ITEMS_NR  =       1,
4151         FLUSH_DELAYED_ITEMS     =       2,
4152         FLUSH_DELALLOC          =       3,
4153         FLUSH_DELALLOC_WAIT     =       4,
4154         ALLOC_CHUNK             =       5,
4155         COMMIT_TRANS            =       6,
4156 };
4157
4158 static int flush_space(struct btrfs_root *root,
4159                        struct btrfs_space_info *space_info, u64 num_bytes,
4160                        u64 orig_bytes, int state)
4161 {
4162         struct btrfs_trans_handle *trans;
4163         int nr;
4164         int ret = 0;
4165
4166         switch (state) {
4167         case FLUSH_DELAYED_ITEMS_NR:
4168         case FLUSH_DELAYED_ITEMS:
4169                 if (state == FLUSH_DELAYED_ITEMS_NR)
4170                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4171                 else
4172                         nr = -1;
4173
4174                 trans = btrfs_join_transaction(root);
4175                 if (IS_ERR(trans)) {
4176                         ret = PTR_ERR(trans);
4177                         break;
4178                 }
4179                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4180                 btrfs_end_transaction(trans, root);
4181                 break;
4182         case FLUSH_DELALLOC:
4183         case FLUSH_DELALLOC_WAIT:
4184                 shrink_delalloc(root, num_bytes, orig_bytes,
4185                                 state == FLUSH_DELALLOC_WAIT);
4186                 break;
4187         case ALLOC_CHUNK:
4188                 trans = btrfs_join_transaction(root);
4189                 if (IS_ERR(trans)) {
4190                         ret = PTR_ERR(trans);
4191                         break;
4192                 }
4193                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4194                                      btrfs_get_alloc_profile(root, 0),
4195                                      CHUNK_ALLOC_NO_FORCE);
4196                 btrfs_end_transaction(trans, root);
4197                 if (ret == -ENOSPC)
4198                         ret = 0;
4199                 break;
4200         case COMMIT_TRANS:
4201                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4202                 break;
4203         default:
4204                 ret = -ENOSPC;
4205                 break;
4206         }
4207
4208         return ret;
4209 }
4210 /**
4211  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4212  * @root - the root we're allocating for
4213  * @block_rsv - the block_rsv we're allocating for
4214  * @orig_bytes - the number of bytes we want
4215  * @flush - whether or not we can flush to make our reservation
4216  *
4217  * This will reserve orgi_bytes number of bytes from the space info associated
4218  * with the block_rsv.  If there is not enough space it will make an attempt to
4219  * flush out space to make room.  It will do this by flushing delalloc if
4220  * possible or committing the transaction.  If flush is 0 then no attempts to
4221  * regain reservations will be made and this will fail if there is not enough
4222  * space already.
4223  */
4224 static int reserve_metadata_bytes(struct btrfs_root *root,
4225                                   struct btrfs_block_rsv *block_rsv,
4226                                   u64 orig_bytes,
4227                                   enum btrfs_reserve_flush_enum flush)
4228 {
4229         struct btrfs_space_info *space_info = block_rsv->space_info;
4230         u64 used;
4231         u64 num_bytes = orig_bytes;
4232         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4233         int ret = 0;
4234         bool flushing = false;
4235
4236 again:
4237         ret = 0;
4238         spin_lock(&space_info->lock);
4239         /*
4240          * We only want to wait if somebody other than us is flushing and we
4241          * are actually allowed to flush all things.
4242          */
4243         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4244                space_info->flush) {
4245                 spin_unlock(&space_info->lock);
4246                 /*
4247                  * If we have a trans handle we can't wait because the flusher
4248                  * may have to commit the transaction, which would mean we would
4249                  * deadlock since we are waiting for the flusher to finish, but
4250                  * hold the current transaction open.
4251                  */
4252                 if (current->journal_info)
4253                         return -EAGAIN;
4254                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4255                 /* Must have been killed, return */
4256                 if (ret)
4257                         return -EINTR;
4258
4259                 spin_lock(&space_info->lock);
4260         }
4261
4262         ret = -ENOSPC;
4263         used = space_info->bytes_used + space_info->bytes_reserved +
4264                 space_info->bytes_pinned + space_info->bytes_readonly +
4265                 space_info->bytes_may_use;
4266
4267         /*
4268          * The idea here is that we've not already over-reserved the block group
4269          * then we can go ahead and save our reservation first and then start
4270          * flushing if we need to.  Otherwise if we've already overcommitted
4271          * lets start flushing stuff first and then come back and try to make
4272          * our reservation.
4273          */
4274         if (used <= space_info->total_bytes) {
4275                 if (used + orig_bytes <= space_info->total_bytes) {
4276                         space_info->bytes_may_use += orig_bytes;
4277                         trace_btrfs_space_reservation(root->fs_info,
4278                                 "space_info", space_info->flags, orig_bytes, 1);
4279                         ret = 0;
4280                 } else {
4281                         /*
4282                          * Ok set num_bytes to orig_bytes since we aren't
4283                          * overocmmitted, this way we only try and reclaim what
4284                          * we need.
4285                          */
4286                         num_bytes = orig_bytes;
4287                 }
4288         } else {
4289                 /*
4290                  * Ok we're over committed, set num_bytes to the overcommitted
4291                  * amount plus the amount of bytes that we need for this
4292                  * reservation.
4293                  */
4294                 num_bytes = used - space_info->total_bytes +
4295                         (orig_bytes * 2);
4296         }
4297
4298         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4299                 space_info->bytes_may_use += orig_bytes;
4300                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4301                                               space_info->flags, orig_bytes,
4302                                               1);
4303                 ret = 0;
4304         }
4305
4306         /*
4307          * Couldn't make our reservation, save our place so while we're trying
4308          * to reclaim space we can actually use it instead of somebody else
4309          * stealing it from us.
4310          *
4311          * We make the other tasks wait for the flush only when we can flush
4312          * all things.
4313          */
4314         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4315                 flushing = true;
4316                 space_info->flush = 1;
4317         }
4318
4319         spin_unlock(&space_info->lock);
4320
4321         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4322                 goto out;
4323
4324         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4325                           flush_state);
4326         flush_state++;
4327
4328         /*
4329          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4330          * would happen. So skip delalloc flush.
4331          */
4332         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4333             (flush_state == FLUSH_DELALLOC ||
4334              flush_state == FLUSH_DELALLOC_WAIT))
4335                 flush_state = ALLOC_CHUNK;
4336
4337         if (!ret)
4338                 goto again;
4339         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4340                  flush_state < COMMIT_TRANS)
4341                 goto again;
4342         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4343                  flush_state <= COMMIT_TRANS)
4344                 goto again;
4345
4346 out:
4347         if (ret == -ENOSPC &&
4348             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4349                 struct btrfs_block_rsv *global_rsv =
4350                         &root->fs_info->global_block_rsv;
4351
4352                 if (block_rsv != global_rsv &&
4353                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4354                         ret = 0;
4355         }
4356         if (ret == -ENOSPC)
4357                 trace_btrfs_space_reservation(root->fs_info,
4358                                               "space_info:enospc",
4359                                               space_info->flags, orig_bytes, 1);
4360         if (flushing) {
4361                 spin_lock(&space_info->lock);
4362                 space_info->flush = 0;
4363                 wake_up_all(&space_info->wait);
4364                 spin_unlock(&space_info->lock);
4365         }
4366         return ret;
4367 }
4368
4369 static struct btrfs_block_rsv *get_block_rsv(
4370                                         const struct btrfs_trans_handle *trans,
4371                                         const struct btrfs_root *root)
4372 {
4373         struct btrfs_block_rsv *block_rsv = NULL;
4374
4375         if (root->ref_cows)
4376                 block_rsv = trans->block_rsv;
4377
4378         if (root == root->fs_info->csum_root && trans->adding_csums)
4379                 block_rsv = trans->block_rsv;
4380
4381         if (root == root->fs_info->uuid_root)
4382                 block_rsv = trans->block_rsv;
4383
4384         if (!block_rsv)
4385                 block_rsv = root->block_rsv;
4386
4387         if (!block_rsv)
4388                 block_rsv = &root->fs_info->empty_block_rsv;
4389
4390         return block_rsv;
4391 }
4392
4393 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4394                                u64 num_bytes)
4395 {
4396         int ret = -ENOSPC;
4397         spin_lock(&block_rsv->lock);
4398         if (block_rsv->reserved >= num_bytes) {
4399                 block_rsv->reserved -= num_bytes;
4400                 if (block_rsv->reserved < block_rsv->size)
4401                         block_rsv->full = 0;
4402                 ret = 0;
4403         }
4404         spin_unlock(&block_rsv->lock);
4405         return ret;
4406 }
4407
4408 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4409                                 u64 num_bytes, int update_size)
4410 {
4411         spin_lock(&block_rsv->lock);
4412         block_rsv->reserved += num_bytes;
4413         if (update_size)
4414                 block_rsv->size += num_bytes;
4415         else if (block_rsv->reserved >= block_rsv->size)
4416                 block_rsv->full = 1;
4417         spin_unlock(&block_rsv->lock);
4418 }
4419
4420 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4421                              struct btrfs_block_rsv *dest, u64 num_bytes,
4422                              int min_factor)
4423 {
4424         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4425         u64 min_bytes;
4426
4427         if (global_rsv->space_info != dest->space_info)
4428                 return -ENOSPC;
4429
4430         spin_lock(&global_rsv->lock);
4431         min_bytes = div_factor(global_rsv->size, min_factor);
4432         if (global_rsv->reserved < min_bytes + num_bytes) {
4433                 spin_unlock(&global_rsv->lock);
4434                 return -ENOSPC;
4435         }
4436         global_rsv->reserved -= num_bytes;
4437         if (global_rsv->reserved < global_rsv->size)
4438                 global_rsv->full = 0;
4439         spin_unlock(&global_rsv->lock);
4440
4441         block_rsv_add_bytes(dest, num_bytes, 1);
4442         return 0;
4443 }
4444
4445 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4446                                     struct btrfs_block_rsv *block_rsv,
4447                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4448 {
4449         struct btrfs_space_info *space_info = block_rsv->space_info;
4450
4451         spin_lock(&block_rsv->lock);
4452         if (num_bytes == (u64)-1)
4453                 num_bytes = block_rsv->size;
4454         block_rsv->size -= num_bytes;
4455         if (block_rsv->reserved >= block_rsv->size) {
4456                 num_bytes = block_rsv->reserved - block_rsv->size;
4457                 block_rsv->reserved = block_rsv->size;
4458                 block_rsv->full = 1;
4459         } else {
4460                 num_bytes = 0;
4461         }
4462         spin_unlock(&block_rsv->lock);
4463
4464         if (num_bytes > 0) {
4465                 if (dest) {
4466                         spin_lock(&dest->lock);
4467                         if (!dest->full) {
4468                                 u64 bytes_to_add;
4469
4470                                 bytes_to_add = dest->size - dest->reserved;
4471                                 bytes_to_add = min(num_bytes, bytes_to_add);
4472                                 dest->reserved += bytes_to_add;
4473                                 if (dest->reserved >= dest->size)
4474                                         dest->full = 1;
4475                                 num_bytes -= bytes_to_add;
4476                         }
4477                         spin_unlock(&dest->lock);
4478                 }
4479                 if (num_bytes) {
4480                         spin_lock(&space_info->lock);
4481                         space_info->bytes_may_use -= num_bytes;
4482                         trace_btrfs_space_reservation(fs_info, "space_info",
4483                                         space_info->flags, num_bytes, 0);
4484                         spin_unlock(&space_info->lock);
4485                 }
4486         }
4487 }
4488
4489 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4490                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4491 {
4492         int ret;
4493
4494         ret = block_rsv_use_bytes(src, num_bytes);
4495         if (ret)
4496                 return ret;
4497
4498         block_rsv_add_bytes(dst, num_bytes, 1);
4499         return 0;
4500 }
4501
4502 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4503 {
4504         memset(rsv, 0, sizeof(*rsv));
4505         spin_lock_init(&rsv->lock);
4506         rsv->type = type;
4507 }
4508
4509 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4510                                               unsigned short type)
4511 {
4512         struct btrfs_block_rsv *block_rsv;
4513         struct btrfs_fs_info *fs_info = root->fs_info;
4514
4515         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4516         if (!block_rsv)
4517                 return NULL;
4518
4519         btrfs_init_block_rsv(block_rsv, type);
4520         block_rsv->space_info = __find_space_info(fs_info,
4521                                                   BTRFS_BLOCK_GROUP_METADATA);
4522         return block_rsv;
4523 }
4524
4525 void btrfs_free_block_rsv(struct btrfs_root *root,
4526                           struct btrfs_block_rsv *rsv)
4527 {
4528         if (!rsv)
4529                 return;
4530         btrfs_block_rsv_release(root, rsv, (u64)-1);
4531         kfree(rsv);
4532 }
4533
4534 int btrfs_block_rsv_add(struct btrfs_root *root,
4535                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4536                         enum btrfs_reserve_flush_enum flush)
4537 {
4538         int ret;
4539
4540         if (num_bytes == 0)
4541                 return 0;
4542
4543         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4544         if (!ret) {
4545                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4546                 return 0;
4547         }
4548
4549         return ret;
4550 }
4551
4552 int btrfs_block_rsv_check(struct btrfs_root *root,
4553                           struct btrfs_block_rsv *block_rsv, int min_factor)
4554 {
4555         u64 num_bytes = 0;
4556         int ret = -ENOSPC;
4557
4558         if (!block_rsv)
4559                 return 0;
4560
4561         spin_lock(&block_rsv->lock);
4562         num_bytes = div_factor(block_rsv->size, min_factor);
4563         if (block_rsv->reserved >= num_bytes)
4564                 ret = 0;
4565         spin_unlock(&block_rsv->lock);
4566
4567         return ret;
4568 }
4569
4570 int btrfs_block_rsv_refill(struct btrfs_root *root,
4571                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4572                            enum btrfs_reserve_flush_enum flush)
4573 {
4574         u64 num_bytes = 0;
4575         int ret = -ENOSPC;
4576
4577         if (!block_rsv)
4578                 return 0;
4579
4580         spin_lock(&block_rsv->lock);
4581         num_bytes = min_reserved;
4582         if (block_rsv->reserved >= num_bytes)
4583                 ret = 0;
4584         else
4585                 num_bytes -= block_rsv->reserved;
4586         spin_unlock(&block_rsv->lock);
4587
4588         if (!ret)
4589                 return 0;
4590
4591         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4592         if (!ret) {
4593                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4594                 return 0;
4595         }
4596
4597         return ret;
4598 }
4599
4600 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4601                             struct btrfs_block_rsv *dst_rsv,
4602                             u64 num_bytes)
4603 {
4604         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4605 }
4606
4607 void btrfs_block_rsv_release(struct btrfs_root *root,
4608                              struct btrfs_block_rsv *block_rsv,
4609                              u64 num_bytes)
4610 {
4611         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4612         if (global_rsv == block_rsv ||
4613             block_rsv->space_info != global_rsv->space_info)
4614                 global_rsv = NULL;
4615         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4616                                 num_bytes);
4617 }
4618
4619 /*
4620  * helper to calculate size of global block reservation.
4621  * the desired value is sum of space used by extent tree,
4622  * checksum tree and root tree
4623  */
4624 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4625 {
4626         struct btrfs_space_info *sinfo;
4627         u64 num_bytes;
4628         u64 meta_used;
4629         u64 data_used;
4630         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4631
4632         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4633         spin_lock(&sinfo->lock);
4634         data_used = sinfo->bytes_used;
4635         spin_unlock(&sinfo->lock);
4636
4637         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4638         spin_lock(&sinfo->lock);
4639         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4640                 data_used = 0;
4641         meta_used = sinfo->bytes_used;
4642         spin_unlock(&sinfo->lock);
4643
4644         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4645                     csum_size * 2;
4646         num_bytes += div64_u64(data_used + meta_used, 50);
4647
4648         if (num_bytes * 3 > meta_used)
4649                 num_bytes = div64_u64(meta_used, 3);
4650
4651         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4652 }
4653
4654 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4655 {
4656         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4657         struct btrfs_space_info *sinfo = block_rsv->space_info;
4658         u64 num_bytes;
4659
4660         num_bytes = calc_global_metadata_size(fs_info);
4661
4662         spin_lock(&sinfo->lock);
4663         spin_lock(&block_rsv->lock);
4664
4665         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4666
4667         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4668                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4669                     sinfo->bytes_may_use;
4670
4671         if (sinfo->total_bytes > num_bytes) {
4672                 num_bytes = sinfo->total_bytes - num_bytes;
4673                 block_rsv->reserved += num_bytes;
4674                 sinfo->bytes_may_use += num_bytes;
4675                 trace_btrfs_space_reservation(fs_info, "space_info",
4676                                       sinfo->flags, num_bytes, 1);
4677         }
4678
4679         if (block_rsv->reserved >= block_rsv->size) {
4680                 num_bytes = block_rsv->reserved - block_rsv->size;
4681                 sinfo->bytes_may_use -= num_bytes;
4682                 trace_btrfs_space_reservation(fs_info, "space_info",
4683                                       sinfo->flags, num_bytes, 0);
4684                 block_rsv->reserved = block_rsv->size;
4685                 block_rsv->full = 1;
4686         }
4687
4688         spin_unlock(&block_rsv->lock);
4689         spin_unlock(&sinfo->lock);
4690 }
4691
4692 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4693 {
4694         struct btrfs_space_info *space_info;
4695
4696         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4697         fs_info->chunk_block_rsv.space_info = space_info;
4698
4699         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4700         fs_info->global_block_rsv.space_info = space_info;
4701         fs_info->delalloc_block_rsv.space_info = space_info;
4702         fs_info->trans_block_rsv.space_info = space_info;
4703         fs_info->empty_block_rsv.space_info = space_info;
4704         fs_info->delayed_block_rsv.space_info = space_info;
4705
4706         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4707         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4708         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4709         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4710         if (fs_info->quota_root)
4711                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4712         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4713
4714         update_global_block_rsv(fs_info);
4715 }
4716
4717 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4718 {
4719         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4720                                 (u64)-1);
4721         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4722         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4723         WARN_ON(fs_info->trans_block_rsv.size > 0);
4724         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4725         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4726         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4727         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4728         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4729 }
4730
4731 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4732                                   struct btrfs_root *root)
4733 {
4734         if (!trans->block_rsv)
4735                 return;
4736
4737         if (!trans->bytes_reserved)
4738                 return;
4739
4740         trace_btrfs_space_reservation(root->fs_info, "transaction",
4741                                       trans->transid, trans->bytes_reserved, 0);
4742         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4743         trans->bytes_reserved = 0;
4744 }
4745
4746 /* Can only return 0 or -ENOSPC */
4747 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4748                                   struct inode *inode)
4749 {
4750         struct btrfs_root *root = BTRFS_I(inode)->root;
4751         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4752         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4753
4754         /*
4755          * We need to hold space in order to delete our orphan item once we've
4756          * added it, so this takes the reservation so we can release it later
4757          * when we are truly done with the orphan item.
4758          */
4759         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4760         trace_btrfs_space_reservation(root->fs_info, "orphan",
4761                                       btrfs_ino(inode), num_bytes, 1);
4762         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4763 }
4764
4765 void btrfs_orphan_release_metadata(struct inode *inode)
4766 {
4767         struct btrfs_root *root = BTRFS_I(inode)->root;
4768         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4769         trace_btrfs_space_reservation(root->fs_info, "orphan",
4770                                       btrfs_ino(inode), num_bytes, 0);
4771         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4772 }
4773
4774 /*
4775  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4776  * root: the root of the parent directory
4777  * rsv: block reservation
4778  * items: the number of items that we need do reservation
4779  * qgroup_reserved: used to return the reserved size in qgroup
4780  *
4781  * This function is used to reserve the space for snapshot/subvolume
4782  * creation and deletion. Those operations are different with the
4783  * common file/directory operations, they change two fs/file trees
4784  * and root tree, the number of items that the qgroup reserves is
4785  * different with the free space reservation. So we can not use
4786  * the space reseravtion mechanism in start_transaction().
4787  */
4788 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4789                                      struct btrfs_block_rsv *rsv,
4790                                      int items,
4791                                      u64 *qgroup_reserved,
4792                                      bool use_global_rsv)
4793 {
4794         u64 num_bytes;
4795         int ret;
4796         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4797
4798         if (root->fs_info->quota_enabled) {
4799                 /* One for parent inode, two for dir entries */
4800                 num_bytes = 3 * root->leafsize;
4801                 ret = btrfs_qgroup_reserve(root, num_bytes);
4802                 if (ret)
4803                         return ret;
4804         } else {
4805                 num_bytes = 0;
4806         }
4807
4808         *qgroup_reserved = num_bytes;
4809
4810         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4811         rsv->space_info = __find_space_info(root->fs_info,
4812                                             BTRFS_BLOCK_GROUP_METADATA);
4813         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4814                                   BTRFS_RESERVE_FLUSH_ALL);
4815
4816         if (ret == -ENOSPC && use_global_rsv)
4817                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4818
4819         if (ret) {
4820                 if (*qgroup_reserved)
4821                         btrfs_qgroup_free(root, *qgroup_reserved);
4822         }
4823
4824         return ret;
4825 }
4826
4827 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4828                                       struct btrfs_block_rsv *rsv,
4829                                       u64 qgroup_reserved)
4830 {
4831         btrfs_block_rsv_release(root, rsv, (u64)-1);
4832         if (qgroup_reserved)
4833                 btrfs_qgroup_free(root, qgroup_reserved);
4834 }
4835
4836 /**
4837  * drop_outstanding_extent - drop an outstanding extent
4838  * @inode: the inode we're dropping the extent for
4839  *
4840  * This is called when we are freeing up an outstanding extent, either called
4841  * after an error or after an extent is written.  This will return the number of
4842  * reserved extents that need to be freed.  This must be called with
4843  * BTRFS_I(inode)->lock held.
4844  */
4845 static unsigned drop_outstanding_extent(struct inode *inode)
4846 {
4847         unsigned drop_inode_space = 0;
4848         unsigned dropped_extents = 0;
4849
4850         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4851         BTRFS_I(inode)->outstanding_extents--;
4852
4853         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4854             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4855                                &BTRFS_I(inode)->runtime_flags))
4856                 drop_inode_space = 1;
4857
4858         /*
4859          * If we have more or the same amount of outsanding extents than we have
4860          * reserved then we need to leave the reserved extents count alone.
4861          */
4862         if (BTRFS_I(inode)->outstanding_extents >=
4863             BTRFS_I(inode)->reserved_extents)
4864                 return drop_inode_space;
4865
4866         dropped_extents = BTRFS_I(inode)->reserved_extents -
4867                 BTRFS_I(inode)->outstanding_extents;
4868         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4869         return dropped_extents + drop_inode_space;
4870 }
4871
4872 /**
4873  * calc_csum_metadata_size - return the amount of metada space that must be
4874  *      reserved/free'd for the given bytes.
4875  * @inode: the inode we're manipulating
4876  * @num_bytes: the number of bytes in question
4877  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4878  *
4879  * This adjusts the number of csum_bytes in the inode and then returns the
4880  * correct amount of metadata that must either be reserved or freed.  We
4881  * calculate how many checksums we can fit into one leaf and then divide the
4882  * number of bytes that will need to be checksumed by this value to figure out
4883  * how many checksums will be required.  If we are adding bytes then the number
4884  * may go up and we will return the number of additional bytes that must be
4885  * reserved.  If it is going down we will return the number of bytes that must
4886  * be freed.
4887  *
4888  * This must be called with BTRFS_I(inode)->lock held.
4889  */
4890 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4891                                    int reserve)
4892 {
4893         struct btrfs_root *root = BTRFS_I(inode)->root;
4894         u64 csum_size;
4895         int num_csums_per_leaf;
4896         int num_csums;
4897         int old_csums;
4898
4899         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4900             BTRFS_I(inode)->csum_bytes == 0)
4901                 return 0;
4902
4903         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4904         if (reserve)
4905                 BTRFS_I(inode)->csum_bytes += num_bytes;
4906         else
4907                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4908         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4909         num_csums_per_leaf = (int)div64_u64(csum_size,
4910                                             sizeof(struct btrfs_csum_item) +
4911                                             sizeof(struct btrfs_disk_key));
4912         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4913         num_csums = num_csums + num_csums_per_leaf - 1;
4914         num_csums = num_csums / num_csums_per_leaf;
4915
4916         old_csums = old_csums + num_csums_per_leaf - 1;
4917         old_csums = old_csums / num_csums_per_leaf;
4918
4919         /* No change, no need to reserve more */
4920         if (old_csums == num_csums)
4921                 return 0;
4922
4923         if (reserve)
4924                 return btrfs_calc_trans_metadata_size(root,
4925                                                       num_csums - old_csums);
4926
4927         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4928 }
4929
4930 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4931 {
4932         struct btrfs_root *root = BTRFS_I(inode)->root;
4933         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4934         u64 to_reserve = 0;
4935         u64 csum_bytes;
4936         unsigned nr_extents = 0;
4937         int extra_reserve = 0;
4938         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4939         int ret = 0;
4940         bool delalloc_lock = true;
4941         u64 to_free = 0;
4942         unsigned dropped;
4943
4944         /* If we are a free space inode we need to not flush since we will be in
4945          * the middle of a transaction commit.  We also don't need the delalloc
4946          * mutex since we won't race with anybody.  We need this mostly to make
4947          * lockdep shut its filthy mouth.
4948          */
4949         if (btrfs_is_free_space_inode(inode)) {
4950                 flush = BTRFS_RESERVE_NO_FLUSH;
4951                 delalloc_lock = false;
4952         }
4953
4954         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4955             btrfs_transaction_in_commit(root->fs_info))
4956                 schedule_timeout(1);
4957
4958         if (delalloc_lock)
4959                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4960
4961         num_bytes = ALIGN(num_bytes, root->sectorsize);
4962
4963         spin_lock(&BTRFS_I(inode)->lock);
4964         BTRFS_I(inode)->outstanding_extents++;
4965
4966         if (BTRFS_I(inode)->outstanding_extents >
4967             BTRFS_I(inode)->reserved_extents)
4968                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4969                         BTRFS_I(inode)->reserved_extents;
4970
4971         /*
4972          * Add an item to reserve for updating the inode when we complete the
4973          * delalloc io.
4974          */
4975         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4976                       &BTRFS_I(inode)->runtime_flags)) {
4977                 nr_extents++;
4978                 extra_reserve = 1;
4979         }
4980
4981         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4982         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4983         csum_bytes = BTRFS_I(inode)->csum_bytes;
4984         spin_unlock(&BTRFS_I(inode)->lock);
4985
4986         if (root->fs_info->quota_enabled) {
4987                 ret = btrfs_qgroup_reserve(root, num_bytes +
4988                                            nr_extents * root->leafsize);
4989                 if (ret)
4990                         goto out_fail;
4991         }
4992
4993         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4994         if (unlikely(ret)) {
4995                 if (root->fs_info->quota_enabled)
4996                         btrfs_qgroup_free(root, num_bytes +
4997                                                 nr_extents * root->leafsize);
4998                 goto out_fail;
4999         }
5000
5001         spin_lock(&BTRFS_I(inode)->lock);
5002         if (extra_reserve) {
5003                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5004                         &BTRFS_I(inode)->runtime_flags);
5005                 nr_extents--;
5006         }
5007         BTRFS_I(inode)->reserved_extents += nr_extents;
5008         spin_unlock(&BTRFS_I(inode)->lock);
5009
5010         if (delalloc_lock)
5011                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5012
5013         if (to_reserve)
5014                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5015                                               btrfs_ino(inode), to_reserve, 1);
5016         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5017
5018         return 0;
5019
5020 out_fail:
5021         spin_lock(&BTRFS_I(inode)->lock);
5022         dropped = drop_outstanding_extent(inode);
5023         /*
5024          * If the inodes csum_bytes is the same as the original
5025          * csum_bytes then we know we haven't raced with any free()ers
5026          * so we can just reduce our inodes csum bytes and carry on.
5027          */
5028         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5029                 calc_csum_metadata_size(inode, num_bytes, 0);
5030         } else {
5031                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5032                 u64 bytes;
5033
5034                 /*
5035                  * This is tricky, but first we need to figure out how much we
5036                  * free'd from any free-ers that occured during this
5037                  * reservation, so we reset ->csum_bytes to the csum_bytes
5038                  * before we dropped our lock, and then call the free for the
5039                  * number of bytes that were freed while we were trying our
5040                  * reservation.
5041                  */
5042                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5043                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5044                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5045
5046
5047                 /*
5048                  * Now we need to see how much we would have freed had we not
5049                  * been making this reservation and our ->csum_bytes were not
5050                  * artificially inflated.
5051                  */
5052                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5053                 bytes = csum_bytes - orig_csum_bytes;
5054                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5055
5056                 /*
5057                  * Now reset ->csum_bytes to what it should be.  If bytes is
5058                  * more than to_free then we would have free'd more space had we
5059                  * not had an artificially high ->csum_bytes, so we need to free
5060                  * the remainder.  If bytes is the same or less then we don't
5061                  * need to do anything, the other free-ers did the correct
5062                  * thing.
5063                  */
5064                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5065                 if (bytes > to_free)
5066                         to_free = bytes - to_free;
5067                 else
5068                         to_free = 0;
5069         }
5070         spin_unlock(&BTRFS_I(inode)->lock);
5071         if (dropped)
5072                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5073
5074         if (to_free) {
5075                 btrfs_block_rsv_release(root, block_rsv, to_free);
5076                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5077                                               btrfs_ino(inode), to_free, 0);
5078         }
5079         if (delalloc_lock)
5080                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5081         return ret;
5082 }
5083
5084 /**
5085  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5086  * @inode: the inode to release the reservation for
5087  * @num_bytes: the number of bytes we're releasing
5088  *
5089  * This will release the metadata reservation for an inode.  This can be called
5090  * once we complete IO for a given set of bytes to release their metadata
5091  * reservations.
5092  */
5093 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5094 {
5095         struct btrfs_root *root = BTRFS_I(inode)->root;
5096         u64 to_free = 0;
5097         unsigned dropped;
5098
5099         num_bytes = ALIGN(num_bytes, root->sectorsize);
5100         spin_lock(&BTRFS_I(inode)->lock);
5101         dropped = drop_outstanding_extent(inode);
5102
5103         if (num_bytes)
5104                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5105         spin_unlock(&BTRFS_I(inode)->lock);
5106         if (dropped > 0)
5107                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5108
5109         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5110                                       btrfs_ino(inode), to_free, 0);
5111         if (root->fs_info->quota_enabled) {
5112                 btrfs_qgroup_free(root, num_bytes +
5113                                         dropped * root->leafsize);
5114         }
5115
5116         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5117                                 to_free);
5118 }
5119
5120 /**
5121  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5122  * @inode: inode we're writing to
5123  * @num_bytes: the number of bytes we want to allocate
5124  *
5125  * This will do the following things
5126  *
5127  * o reserve space in the data space info for num_bytes
5128  * o reserve space in the metadata space info based on number of outstanding
5129  *   extents and how much csums will be needed
5130  * o add to the inodes ->delalloc_bytes
5131  * o add it to the fs_info's delalloc inodes list.
5132  *
5133  * This will return 0 for success and -ENOSPC if there is no space left.
5134  */
5135 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5136 {
5137         int ret;
5138
5139         ret = btrfs_check_data_free_space(inode, num_bytes);
5140         if (ret)
5141                 return ret;
5142
5143         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5144         if (ret) {
5145                 btrfs_free_reserved_data_space(inode, num_bytes);
5146                 return ret;
5147         }
5148
5149         return 0;
5150 }
5151
5152 /**
5153  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5154  * @inode: inode we're releasing space for
5155  * @num_bytes: the number of bytes we want to free up
5156  *
5157  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5158  * called in the case that we don't need the metadata AND data reservations
5159  * anymore.  So if there is an error or we insert an inline extent.
5160  *
5161  * This function will release the metadata space that was not used and will
5162  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5163  * list if there are no delalloc bytes left.
5164  */
5165 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5166 {
5167         btrfs_delalloc_release_metadata(inode, num_bytes);
5168         btrfs_free_reserved_data_space(inode, num_bytes);
5169 }
5170
5171 static int update_block_group(struct btrfs_root *root,
5172                               u64 bytenr, u64 num_bytes, int alloc)
5173 {
5174         struct btrfs_block_group_cache *cache = NULL;
5175         struct btrfs_fs_info *info = root->fs_info;
5176         u64 total = num_bytes;
5177         u64 old_val;
5178         u64 byte_in_group;
5179         int factor;
5180
5181         /* block accounting for super block */
5182         spin_lock(&info->delalloc_root_lock);
5183         old_val = btrfs_super_bytes_used(info->super_copy);
5184         if (alloc)
5185                 old_val += num_bytes;
5186         else
5187                 old_val -= num_bytes;
5188         btrfs_set_super_bytes_used(info->super_copy, old_val);
5189         spin_unlock(&info->delalloc_root_lock);
5190
5191         while (total) {
5192                 cache = btrfs_lookup_block_group(info, bytenr);
5193                 if (!cache)
5194                         return -ENOENT;
5195                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5196                                     BTRFS_BLOCK_GROUP_RAID1 |
5197                                     BTRFS_BLOCK_GROUP_RAID10))
5198                         factor = 2;
5199                 else
5200                         factor = 1;
5201                 /*
5202                  * If this block group has free space cache written out, we
5203                  * need to make sure to load it if we are removing space.  This
5204                  * is because we need the unpinning stage to actually add the
5205                  * space back to the block group, otherwise we will leak space.
5206                  */
5207                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5208                         cache_block_group(cache, 1);
5209
5210                 byte_in_group = bytenr - cache->key.objectid;
5211                 WARN_ON(byte_in_group > cache->key.offset);
5212
5213                 spin_lock(&cache->space_info->lock);
5214                 spin_lock(&cache->lock);
5215
5216                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5217                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5218                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5219
5220                 cache->dirty = 1;
5221                 old_val = btrfs_block_group_used(&cache->item);
5222                 num_bytes = min(total, cache->key.offset - byte_in_group);
5223                 if (alloc) {
5224                         old_val += num_bytes;
5225                         btrfs_set_block_group_used(&cache->item, old_val);
5226                         cache->reserved -= num_bytes;
5227                         cache->space_info->bytes_reserved -= num_bytes;
5228                         cache->space_info->bytes_used += num_bytes;
5229                         cache->space_info->disk_used += num_bytes * factor;
5230                         spin_unlock(&cache->lock);
5231                         spin_unlock(&cache->space_info->lock);
5232                 } else {
5233                         old_val -= num_bytes;
5234                         btrfs_set_block_group_used(&cache->item, old_val);
5235                         cache->pinned += num_bytes;
5236                         cache->space_info->bytes_pinned += num_bytes;
5237                         cache->space_info->bytes_used -= num_bytes;
5238                         cache->space_info->disk_used -= num_bytes * factor;
5239                         spin_unlock(&cache->lock);
5240                         spin_unlock(&cache->space_info->lock);
5241
5242                         set_extent_dirty(info->pinned_extents,
5243                                          bytenr, bytenr + num_bytes - 1,
5244                                          GFP_NOFS | __GFP_NOFAIL);
5245                 }
5246                 btrfs_put_block_group(cache);
5247                 total -= num_bytes;
5248                 bytenr += num_bytes;
5249         }
5250         return 0;
5251 }
5252
5253 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5254 {
5255         struct btrfs_block_group_cache *cache;
5256         u64 bytenr;
5257
5258         spin_lock(&root->fs_info->block_group_cache_lock);
5259         bytenr = root->fs_info->first_logical_byte;
5260         spin_unlock(&root->fs_info->block_group_cache_lock);
5261
5262         if (bytenr < (u64)-1)
5263                 return bytenr;
5264
5265         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5266         if (!cache)
5267                 return 0;
5268
5269         bytenr = cache->key.objectid;
5270         btrfs_put_block_group(cache);
5271
5272         return bytenr;
5273 }
5274
5275 static int pin_down_extent(struct btrfs_root *root,
5276                            struct btrfs_block_group_cache *cache,
5277                            u64 bytenr, u64 num_bytes, int reserved)
5278 {
5279         spin_lock(&cache->space_info->lock);
5280         spin_lock(&cache->lock);
5281         cache->pinned += num_bytes;
5282         cache->space_info->bytes_pinned += num_bytes;
5283         if (reserved) {
5284                 cache->reserved -= num_bytes;
5285                 cache->space_info->bytes_reserved -= num_bytes;
5286         }
5287         spin_unlock(&cache->lock);
5288         spin_unlock(&cache->space_info->lock);
5289
5290         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5291                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5292         if (reserved)
5293                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5294         return 0;
5295 }
5296
5297 /*
5298  * this function must be called within transaction
5299  */
5300 int btrfs_pin_extent(struct btrfs_root *root,
5301                      u64 bytenr, u64 num_bytes, int reserved)
5302 {
5303         struct btrfs_block_group_cache *cache;
5304
5305         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5306         BUG_ON(!cache); /* Logic error */
5307
5308         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5309
5310         btrfs_put_block_group(cache);
5311         return 0;
5312 }
5313
5314 /*
5315  * this function must be called within transaction
5316  */
5317 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5318                                     u64 bytenr, u64 num_bytes)
5319 {
5320         struct btrfs_block_group_cache *cache;
5321         int ret;
5322
5323         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5324         if (!cache)
5325                 return -EINVAL;
5326
5327         /*
5328          * pull in the free space cache (if any) so that our pin
5329          * removes the free space from the cache.  We have load_only set
5330          * to one because the slow code to read in the free extents does check
5331          * the pinned extents.
5332          */
5333         cache_block_group(cache, 1);
5334
5335         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5336
5337         /* remove us from the free space cache (if we're there at all) */
5338         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5339         btrfs_put_block_group(cache);
5340         return ret;
5341 }
5342
5343 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5344 {
5345         int ret;
5346         struct btrfs_block_group_cache *block_group;
5347         struct btrfs_caching_control *caching_ctl;
5348
5349         block_group = btrfs_lookup_block_group(root->fs_info, start);
5350         if (!block_group)
5351                 return -EINVAL;
5352
5353         cache_block_group(block_group, 0);
5354         caching_ctl = get_caching_control(block_group);
5355
5356         if (!caching_ctl) {
5357                 /* Logic error */
5358                 BUG_ON(!block_group_cache_done(block_group));
5359                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5360         } else {
5361                 mutex_lock(&caching_ctl->mutex);
5362
5363                 if (start >= caching_ctl->progress) {
5364                         ret = add_excluded_extent(root, start, num_bytes);
5365                 } else if (start + num_bytes <= caching_ctl->progress) {
5366                         ret = btrfs_remove_free_space(block_group,
5367                                                       start, num_bytes);
5368                 } else {
5369                         num_bytes = caching_ctl->progress - start;
5370                         ret = btrfs_remove_free_space(block_group,
5371                                                       start, num_bytes);
5372                         if (ret)
5373                                 goto out_lock;
5374
5375                         num_bytes = (start + num_bytes) -
5376                                 caching_ctl->progress;
5377                         start = caching_ctl->progress;
5378                         ret = add_excluded_extent(root, start, num_bytes);
5379                 }
5380 out_lock:
5381                 mutex_unlock(&caching_ctl->mutex);
5382                 put_caching_control(caching_ctl);
5383         }
5384         btrfs_put_block_group(block_group);
5385         return ret;
5386 }
5387
5388 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5389                                  struct extent_buffer *eb)
5390 {
5391         struct btrfs_file_extent_item *item;
5392         struct btrfs_key key;
5393         int found_type;
5394         int i;
5395
5396         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5397                 return 0;
5398
5399         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5400                 btrfs_item_key_to_cpu(eb, &key, i);
5401                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5402                         continue;
5403                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5404                 found_type = btrfs_file_extent_type(eb, item);
5405                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5406                         continue;
5407                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5408                         continue;
5409                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5410                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5411                 __exclude_logged_extent(log, key.objectid, key.offset);
5412         }
5413
5414         return 0;
5415 }
5416
5417 /**
5418  * btrfs_update_reserved_bytes - update the block_group and space info counters
5419  * @cache:      The cache we are manipulating
5420  * @num_bytes:  The number of bytes in question
5421  * @reserve:    One of the reservation enums
5422  *
5423  * This is called by the allocator when it reserves space, or by somebody who is
5424  * freeing space that was never actually used on disk.  For example if you
5425  * reserve some space for a new leaf in transaction A and before transaction A
5426  * commits you free that leaf, you call this with reserve set to 0 in order to
5427  * clear the reservation.
5428  *
5429  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5430  * ENOSPC accounting.  For data we handle the reservation through clearing the
5431  * delalloc bits in the io_tree.  We have to do this since we could end up
5432  * allocating less disk space for the amount of data we have reserved in the
5433  * case of compression.
5434  *
5435  * If this is a reservation and the block group has become read only we cannot
5436  * make the reservation and return -EAGAIN, otherwise this function always
5437  * succeeds.
5438  */
5439 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5440                                        u64 num_bytes, int reserve)
5441 {
5442         struct btrfs_space_info *space_info = cache->space_info;
5443         int ret = 0;
5444
5445         spin_lock(&space_info->lock);
5446         spin_lock(&cache->lock);
5447         if (reserve != RESERVE_FREE) {
5448                 if (cache->ro) {
5449                         ret = -EAGAIN;
5450                 } else {
5451                         cache->reserved += num_bytes;
5452                         space_info->bytes_reserved += num_bytes;
5453                         if (reserve == RESERVE_ALLOC) {
5454                                 trace_btrfs_space_reservation(cache->fs_info,
5455                                                 "space_info", space_info->flags,
5456                                                 num_bytes, 0);
5457                                 space_info->bytes_may_use -= num_bytes;
5458                         }
5459                 }
5460         } else {
5461                 if (cache->ro)
5462                         space_info->bytes_readonly += num_bytes;
5463                 cache->reserved -= num_bytes;
5464                 space_info->bytes_reserved -= num_bytes;
5465         }
5466         spin_unlock(&cache->lock);
5467         spin_unlock(&space_info->lock);
5468         return ret;
5469 }
5470
5471 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5472                                 struct btrfs_root *root)
5473 {
5474         struct btrfs_fs_info *fs_info = root->fs_info;
5475         struct btrfs_caching_control *next;
5476         struct btrfs_caching_control *caching_ctl;
5477         struct btrfs_block_group_cache *cache;
5478         struct btrfs_space_info *space_info;
5479
5480         down_write(&fs_info->extent_commit_sem);
5481
5482         list_for_each_entry_safe(caching_ctl, next,
5483                                  &fs_info->caching_block_groups, list) {
5484                 cache = caching_ctl->block_group;
5485                 if (block_group_cache_done(cache)) {
5486                         cache->last_byte_to_unpin = (u64)-1;
5487                         list_del_init(&caching_ctl->list);
5488                         put_caching_control(caching_ctl);
5489                 } else {
5490                         cache->last_byte_to_unpin = caching_ctl->progress;
5491                 }
5492         }
5493
5494         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5495                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5496         else
5497                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5498
5499         up_write(&fs_info->extent_commit_sem);
5500
5501         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5502                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5503
5504         update_global_block_rsv(fs_info);
5505 }
5506
5507 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5508 {
5509         struct btrfs_fs_info *fs_info = root->fs_info;
5510         struct btrfs_block_group_cache *cache = NULL;
5511         struct btrfs_space_info *space_info;
5512         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5513         u64 len;
5514         bool readonly;
5515
5516         while (start <= end) {
5517                 readonly = false;
5518                 if (!cache ||
5519                     start >= cache->key.objectid + cache->key.offset) {
5520                         if (cache)
5521                                 btrfs_put_block_group(cache);
5522                         cache = btrfs_lookup_block_group(fs_info, start);
5523                         BUG_ON(!cache); /* Logic error */
5524                 }
5525
5526                 len = cache->key.objectid + cache->key.offset - start;
5527                 len = min(len, end + 1 - start);
5528
5529                 if (start < cache->last_byte_to_unpin) {
5530                         len = min(len, cache->last_byte_to_unpin - start);
5531                         btrfs_add_free_space(cache, start, len);
5532                 }
5533
5534                 start += len;
5535                 space_info = cache->space_info;
5536
5537                 spin_lock(&space_info->lock);
5538                 spin_lock(&cache->lock);
5539                 cache->pinned -= len;
5540                 space_info->bytes_pinned -= len;
5541                 if (cache->ro) {
5542                         space_info->bytes_readonly += len;
5543                         readonly = true;
5544                 }
5545                 spin_unlock(&cache->lock);
5546                 if (!readonly && global_rsv->space_info == space_info) {
5547                         spin_lock(&global_rsv->lock);
5548                         if (!global_rsv->full) {
5549                                 len = min(len, global_rsv->size -
5550                                           global_rsv->reserved);
5551                                 global_rsv->reserved += len;
5552                                 space_info->bytes_may_use += len;
5553                                 if (global_rsv->reserved >= global_rsv->size)
5554                                         global_rsv->full = 1;
5555                         }
5556                         spin_unlock(&global_rsv->lock);
5557                 }
5558                 spin_unlock(&space_info->lock);
5559         }
5560
5561         if (cache)
5562                 btrfs_put_block_group(cache);
5563         return 0;
5564 }
5565
5566 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5567                                struct btrfs_root *root)
5568 {
5569         struct btrfs_fs_info *fs_info = root->fs_info;
5570         struct extent_io_tree *unpin;
5571         u64 start;
5572         u64 end;
5573         int ret;
5574
5575         if (trans->aborted)
5576                 return 0;
5577
5578         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5579                 unpin = &fs_info->freed_extents[1];
5580         else
5581                 unpin = &fs_info->freed_extents[0];
5582
5583         while (1) {
5584                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5585                                             EXTENT_DIRTY, NULL);
5586                 if (ret)
5587                         break;
5588
5589                 if (btrfs_test_opt(root, DISCARD))
5590                         ret = btrfs_discard_extent(root, start,
5591                                                    end + 1 - start, NULL);
5592
5593                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5594                 unpin_extent_range(root, start, end);
5595                 cond_resched();
5596         }
5597
5598         return 0;
5599 }
5600
5601 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5602                              u64 owner, u64 root_objectid)
5603 {
5604         struct btrfs_space_info *space_info;
5605         u64 flags;
5606
5607         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5608                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5609                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5610                 else
5611                         flags = BTRFS_BLOCK_GROUP_METADATA;
5612         } else {
5613                 flags = BTRFS_BLOCK_GROUP_DATA;
5614         }
5615
5616         space_info = __find_space_info(fs_info, flags);
5617         BUG_ON(!space_info); /* Logic bug */
5618         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5619 }
5620
5621
5622 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5623                                 struct btrfs_root *root,
5624                                 u64 bytenr, u64 num_bytes, u64 parent,
5625                                 u64 root_objectid, u64 owner_objectid,
5626                                 u64 owner_offset, int refs_to_drop,
5627                                 struct btrfs_delayed_extent_op *extent_op)
5628 {
5629         struct btrfs_key key;
5630         struct btrfs_path *path;
5631         struct btrfs_fs_info *info = root->fs_info;
5632         struct btrfs_root *extent_root = info->extent_root;
5633         struct extent_buffer *leaf;
5634         struct btrfs_extent_item *ei;
5635         struct btrfs_extent_inline_ref *iref;
5636         int ret;
5637         int is_data;
5638         int extent_slot = 0;
5639         int found_extent = 0;
5640         int num_to_del = 1;
5641         u32 item_size;
5642         u64 refs;
5643         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5644                                                  SKINNY_METADATA);
5645
5646         path = btrfs_alloc_path();
5647         if (!path)
5648                 return -ENOMEM;
5649
5650         path->reada = 1;
5651         path->leave_spinning = 1;
5652
5653         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5654         BUG_ON(!is_data && refs_to_drop != 1);
5655
5656         if (is_data)
5657                 skinny_metadata = 0;
5658
5659         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5660                                     bytenr, num_bytes, parent,
5661                                     root_objectid, owner_objectid,
5662                                     owner_offset);
5663         if (ret == 0) {
5664                 extent_slot = path->slots[0];
5665                 while (extent_slot >= 0) {
5666                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5667                                               extent_slot);
5668                         if (key.objectid != bytenr)
5669                                 break;
5670                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5671                             key.offset == num_bytes) {
5672                                 found_extent = 1;
5673                                 break;
5674                         }
5675                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5676                             key.offset == owner_objectid) {
5677                                 found_extent = 1;
5678                                 break;
5679                         }
5680                         if (path->slots[0] - extent_slot > 5)
5681                                 break;
5682                         extent_slot--;
5683                 }
5684 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5685                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5686                 if (found_extent && item_size < sizeof(*ei))
5687                         found_extent = 0;
5688 #endif
5689                 if (!found_extent) {
5690                         BUG_ON(iref);
5691                         ret = remove_extent_backref(trans, extent_root, path,
5692                                                     NULL, refs_to_drop,
5693                                                     is_data);
5694                         if (ret) {
5695                                 btrfs_abort_transaction(trans, extent_root, ret);
5696                                 goto out;
5697                         }
5698                         btrfs_release_path(path);
5699                         path->leave_spinning = 1;
5700
5701                         key.objectid = bytenr;
5702                         key.type = BTRFS_EXTENT_ITEM_KEY;
5703                         key.offset = num_bytes;
5704
5705                         if (!is_data && skinny_metadata) {
5706                                 key.type = BTRFS_METADATA_ITEM_KEY;
5707                                 key.offset = owner_objectid;
5708                         }
5709
5710                         ret = btrfs_search_slot(trans, extent_root,
5711                                                 &key, path, -1, 1);
5712                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5713                                 /*
5714                                  * Couldn't find our skinny metadata item,
5715                                  * see if we have ye olde extent item.
5716                                  */
5717                                 path->slots[0]--;
5718                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5719                                                       path->slots[0]);
5720                                 if (key.objectid == bytenr &&
5721                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5722                                     key.offset == num_bytes)
5723                                         ret = 0;
5724                         }
5725
5726                         if (ret > 0 && skinny_metadata) {
5727                                 skinny_metadata = false;
5728                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5729                                 key.offset = num_bytes;
5730                                 btrfs_release_path(path);
5731                                 ret = btrfs_search_slot(trans, extent_root,
5732                                                         &key, path, -1, 1);
5733                         }
5734
5735                         if (ret) {
5736                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5737                                         ret, bytenr);
5738                                 if (ret > 0)
5739                                         btrfs_print_leaf(extent_root,
5740                                                          path->nodes[0]);
5741                         }
5742                         if (ret < 0) {
5743                                 btrfs_abort_transaction(trans, extent_root, ret);
5744                                 goto out;
5745                         }
5746                         extent_slot = path->slots[0];
5747                 }
5748         } else if (WARN_ON(ret == -ENOENT)) {
5749                 btrfs_print_leaf(extent_root, path->nodes[0]);
5750                 btrfs_err(info,
5751                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5752                         bytenr, parent, root_objectid, owner_objectid,
5753                         owner_offset);
5754         } else {
5755                 btrfs_abort_transaction(trans, extent_root, ret);
5756                 goto out;
5757         }
5758
5759         leaf = path->nodes[0];
5760         item_size = btrfs_item_size_nr(leaf, extent_slot);
5761 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5762         if (item_size < sizeof(*ei)) {
5763                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5764                 ret = convert_extent_item_v0(trans, extent_root, path,
5765                                              owner_objectid, 0);
5766                 if (ret < 0) {
5767                         btrfs_abort_transaction(trans, extent_root, ret);
5768                         goto out;
5769                 }
5770
5771                 btrfs_release_path(path);
5772                 path->leave_spinning = 1;
5773
5774                 key.objectid = bytenr;
5775                 key.type = BTRFS_EXTENT_ITEM_KEY;
5776                 key.offset = num_bytes;
5777
5778                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5779                                         -1, 1);
5780                 if (ret) {
5781                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5782                                 ret, bytenr);
5783                         btrfs_print_leaf(extent_root, path->nodes[0]);
5784                 }
5785                 if (ret < 0) {
5786                         btrfs_abort_transaction(trans, extent_root, ret);
5787                         goto out;
5788                 }
5789
5790                 extent_slot = path->slots[0];
5791                 leaf = path->nodes[0];
5792                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5793         }
5794 #endif
5795         BUG_ON(item_size < sizeof(*ei));
5796         ei = btrfs_item_ptr(leaf, extent_slot,
5797                             struct btrfs_extent_item);
5798         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5799             key.type == BTRFS_EXTENT_ITEM_KEY) {
5800                 struct btrfs_tree_block_info *bi;
5801                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5802                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5803                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5804         }
5805
5806         refs = btrfs_extent_refs(leaf, ei);
5807         if (refs < refs_to_drop) {
5808                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5809                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5810                 ret = -EINVAL;
5811                 btrfs_abort_transaction(trans, extent_root, ret);
5812                 goto out;
5813         }
5814         refs -= refs_to_drop;
5815
5816         if (refs > 0) {
5817                 if (extent_op)
5818                         __run_delayed_extent_op(extent_op, leaf, ei);
5819                 /*
5820                  * In the case of inline back ref, reference count will
5821                  * be updated by remove_extent_backref
5822                  */
5823                 if (iref) {
5824                         BUG_ON(!found_extent);
5825                 } else {
5826                         btrfs_set_extent_refs(leaf, ei, refs);
5827                         btrfs_mark_buffer_dirty(leaf);
5828                 }
5829                 if (found_extent) {
5830                         ret = remove_extent_backref(trans, extent_root, path,
5831                                                     iref, refs_to_drop,
5832                                                     is_data);
5833                         if (ret) {
5834                                 btrfs_abort_transaction(trans, extent_root, ret);
5835                                 goto out;
5836                         }
5837                 }
5838                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5839                                  root_objectid);
5840         } else {
5841                 if (found_extent) {
5842                         BUG_ON(is_data && refs_to_drop !=
5843                                extent_data_ref_count(root, path, iref));
5844                         if (iref) {
5845                                 BUG_ON(path->slots[0] != extent_slot);
5846                         } else {
5847                                 BUG_ON(path->slots[0] != extent_slot + 1);
5848                                 path->slots[0] = extent_slot;
5849                                 num_to_del = 2;
5850                         }
5851                 }
5852
5853                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5854                                       num_to_del);
5855                 if (ret) {
5856                         btrfs_abort_transaction(trans, extent_root, ret);
5857                         goto out;
5858                 }
5859                 btrfs_release_path(path);
5860
5861                 if (is_data) {
5862                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5863                         if (ret) {
5864                                 btrfs_abort_transaction(trans, extent_root, ret);
5865                                 goto out;
5866                         }
5867                 }
5868
5869                 ret = update_block_group(root, bytenr, num_bytes, 0);
5870                 if (ret) {
5871                         btrfs_abort_transaction(trans, extent_root, ret);
5872                         goto out;
5873                 }
5874         }
5875 out:
5876         btrfs_free_path(path);
5877         return ret;
5878 }
5879
5880 /*
5881  * when we free an block, it is possible (and likely) that we free the last
5882  * delayed ref for that extent as well.  This searches the delayed ref tree for
5883  * a given extent, and if there are no other delayed refs to be processed, it
5884  * removes it from the tree.
5885  */
5886 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5887                                       struct btrfs_root *root, u64 bytenr)
5888 {
5889         struct btrfs_delayed_ref_head *head;
5890         struct btrfs_delayed_ref_root *delayed_refs;
5891         int ret = 0;
5892
5893         delayed_refs = &trans->transaction->delayed_refs;
5894         spin_lock(&delayed_refs->lock);
5895         head = btrfs_find_delayed_ref_head(trans, bytenr);
5896         if (!head)
5897                 goto out_delayed_unlock;
5898
5899         spin_lock(&head->lock);
5900         if (rb_first(&head->ref_root))
5901                 goto out;
5902
5903         if (head->extent_op) {
5904                 if (!head->must_insert_reserved)
5905                         goto out;
5906                 btrfs_free_delayed_extent_op(head->extent_op);
5907                 head->extent_op = NULL;
5908         }
5909
5910         /*
5911          * waiting for the lock here would deadlock.  If someone else has it
5912          * locked they are already in the process of dropping it anyway
5913          */
5914         if (!mutex_trylock(&head->mutex))
5915                 goto out;
5916
5917         /*
5918          * at this point we have a head with no other entries.  Go
5919          * ahead and process it.
5920          */
5921         head->node.in_tree = 0;
5922         rb_erase(&head->href_node, &delayed_refs->href_root);
5923
5924         atomic_dec(&delayed_refs->num_entries);
5925
5926         /*
5927          * we don't take a ref on the node because we're removing it from the
5928          * tree, so we just steal the ref the tree was holding.
5929          */
5930         delayed_refs->num_heads--;
5931         if (head->processing == 0)
5932                 delayed_refs->num_heads_ready--;
5933         head->processing = 0;
5934         spin_unlock(&head->lock);
5935         spin_unlock(&delayed_refs->lock);
5936
5937         BUG_ON(head->extent_op);
5938         if (head->must_insert_reserved)
5939                 ret = 1;
5940
5941         mutex_unlock(&head->mutex);
5942         btrfs_put_delayed_ref(&head->node);
5943         return ret;
5944 out:
5945         spin_unlock(&head->lock);
5946
5947 out_delayed_unlock:
5948         spin_unlock(&delayed_refs->lock);
5949         return 0;
5950 }
5951
5952 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5953                            struct btrfs_root *root,
5954                            struct extent_buffer *buf,
5955                            u64 parent, int last_ref)
5956 {
5957         struct btrfs_block_group_cache *cache = NULL;
5958         int pin = 1;
5959         int ret;
5960
5961         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5962                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5963                                         buf->start, buf->len,
5964                                         parent, root->root_key.objectid,
5965                                         btrfs_header_level(buf),
5966                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5967                 BUG_ON(ret); /* -ENOMEM */
5968         }
5969
5970         if (!last_ref)
5971                 return;
5972
5973         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5974
5975         if (btrfs_header_generation(buf) == trans->transid) {
5976                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5977                         ret = check_ref_cleanup(trans, root, buf->start);
5978                         if (!ret)
5979                                 goto out;
5980                 }
5981
5982                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5983                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5984                         goto out;
5985                 }
5986
5987                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5988
5989                 btrfs_add_free_space(cache, buf->start, buf->len);
5990                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5991                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
5992                 pin = 0;
5993         }
5994 out:
5995         if (pin)
5996                 add_pinned_bytes(root->fs_info, buf->len,
5997                                  btrfs_header_level(buf),
5998                                  root->root_key.objectid);
5999
6000         /*
6001          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6002          * anymore.
6003          */
6004         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6005         btrfs_put_block_group(cache);
6006 }
6007
6008 /* Can return -ENOMEM */
6009 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6010                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6011                       u64 owner, u64 offset, int for_cow)
6012 {
6013         int ret;
6014         struct btrfs_fs_info *fs_info = root->fs_info;
6015
6016         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6017
6018         /*
6019          * tree log blocks never actually go into the extent allocation
6020          * tree, just update pinning info and exit early.
6021          */
6022         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6023                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6024                 /* unlocks the pinned mutex */
6025                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6026                 ret = 0;
6027         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6028                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6029                                         num_bytes,
6030                                         parent, root_objectid, (int)owner,
6031                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6032         } else {
6033                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6034                                                 num_bytes,
6035                                                 parent, root_objectid, owner,
6036                                                 offset, BTRFS_DROP_DELAYED_REF,
6037                                                 NULL, for_cow);
6038         }
6039         return ret;
6040 }
6041
6042 static u64 stripe_align(struct btrfs_root *root,
6043                         struct btrfs_block_group_cache *cache,
6044                         u64 val, u64 num_bytes)
6045 {
6046         u64 ret = ALIGN(val, root->stripesize);
6047         return ret;
6048 }
6049
6050 /*
6051  * when we wait for progress in the block group caching, its because
6052  * our allocation attempt failed at least once.  So, we must sleep
6053  * and let some progress happen before we try again.
6054  *
6055  * This function will sleep at least once waiting for new free space to
6056  * show up, and then it will check the block group free space numbers
6057  * for our min num_bytes.  Another option is to have it go ahead
6058  * and look in the rbtree for a free extent of a given size, but this
6059  * is a good start.
6060  *
6061  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6062  * any of the information in this block group.
6063  */
6064 static noinline void
6065 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6066                                 u64 num_bytes)
6067 {
6068         struct btrfs_caching_control *caching_ctl;
6069
6070         caching_ctl = get_caching_control(cache);
6071         if (!caching_ctl)
6072                 return;
6073
6074         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6075                    (cache->free_space_ctl->free_space >= num_bytes));
6076
6077         put_caching_control(caching_ctl);
6078 }
6079
6080 static noinline int
6081 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6082 {
6083         struct btrfs_caching_control *caching_ctl;
6084         int ret = 0;
6085
6086         caching_ctl = get_caching_control(cache);
6087         if (!caching_ctl)
6088                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6089
6090         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6091         if (cache->cached == BTRFS_CACHE_ERROR)
6092                 ret = -EIO;
6093         put_caching_control(caching_ctl);
6094         return ret;
6095 }
6096
6097 int __get_raid_index(u64 flags)
6098 {
6099         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6100                 return BTRFS_RAID_RAID10;
6101         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6102                 return BTRFS_RAID_RAID1;
6103         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6104                 return BTRFS_RAID_DUP;
6105         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6106                 return BTRFS_RAID_RAID0;
6107         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6108                 return BTRFS_RAID_RAID5;
6109         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6110                 return BTRFS_RAID_RAID6;
6111
6112         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6113 }
6114
6115 int get_block_group_index(struct btrfs_block_group_cache *cache)
6116 {
6117         return __get_raid_index(cache->flags);
6118 }
6119
6120 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6121         [BTRFS_RAID_RAID10]     = "raid10",
6122         [BTRFS_RAID_RAID1]      = "raid1",
6123         [BTRFS_RAID_DUP]        = "dup",
6124         [BTRFS_RAID_RAID0]      = "raid0",
6125         [BTRFS_RAID_SINGLE]     = "single",
6126         [BTRFS_RAID_RAID5]      = "raid5",
6127         [BTRFS_RAID_RAID6]      = "raid6",
6128 };
6129
6130 static const char *get_raid_name(enum btrfs_raid_types type)
6131 {
6132         if (type >= BTRFS_NR_RAID_TYPES)
6133                 return NULL;
6134
6135         return btrfs_raid_type_names[type];
6136 }
6137
6138 enum btrfs_loop_type {
6139         LOOP_CACHING_NOWAIT = 0,
6140         LOOP_CACHING_WAIT = 1,
6141         LOOP_ALLOC_CHUNK = 2,
6142         LOOP_NO_EMPTY_SIZE = 3,
6143 };
6144
6145 /*
6146  * walks the btree of allocated extents and find a hole of a given size.
6147  * The key ins is changed to record the hole:
6148  * ins->objectid == start position
6149  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6150  * ins->offset == the size of the hole.
6151  * Any available blocks before search_start are skipped.
6152  *
6153  * If there is no suitable free space, we will record the max size of
6154  * the free space extent currently.
6155  */
6156 static noinline int find_free_extent(struct btrfs_root *orig_root,
6157                                      u64 num_bytes, u64 empty_size,
6158                                      u64 hint_byte, struct btrfs_key *ins,
6159                                      u64 flags)
6160 {
6161         int ret = 0;
6162         struct btrfs_root *root = orig_root->fs_info->extent_root;
6163         struct btrfs_free_cluster *last_ptr = NULL;
6164         struct btrfs_block_group_cache *block_group = NULL;
6165         u64 search_start = 0;
6166         u64 max_extent_size = 0;
6167         int empty_cluster = 2 * 1024 * 1024;
6168         struct btrfs_space_info *space_info;
6169         int loop = 0;
6170         int index = __get_raid_index(flags);
6171         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6172                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6173         bool failed_cluster_refill = false;
6174         bool failed_alloc = false;
6175         bool use_cluster = true;
6176         bool have_caching_bg = false;
6177
6178         WARN_ON(num_bytes < root->sectorsize);
6179         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6180         ins->objectid = 0;
6181         ins->offset = 0;
6182
6183         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6184
6185         space_info = __find_space_info(root->fs_info, flags);
6186         if (!space_info) {
6187                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6188                 return -ENOSPC;
6189         }
6190
6191         /*
6192          * If the space info is for both data and metadata it means we have a
6193          * small filesystem and we can't use the clustering stuff.
6194          */
6195         if (btrfs_mixed_space_info(space_info))
6196                 use_cluster = false;
6197
6198         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6199                 last_ptr = &root->fs_info->meta_alloc_cluster;
6200                 if (!btrfs_test_opt(root, SSD))
6201                         empty_cluster = 64 * 1024;
6202         }
6203
6204         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6205             btrfs_test_opt(root, SSD)) {
6206                 last_ptr = &root->fs_info->data_alloc_cluster;
6207         }
6208
6209         if (last_ptr) {
6210                 spin_lock(&last_ptr->lock);
6211                 if (last_ptr->block_group)
6212                         hint_byte = last_ptr->window_start;
6213                 spin_unlock(&last_ptr->lock);
6214         }
6215
6216         search_start = max(search_start, first_logical_byte(root, 0));
6217         search_start = max(search_start, hint_byte);
6218
6219         if (!last_ptr)
6220                 empty_cluster = 0;
6221
6222         if (search_start == hint_byte) {
6223                 block_group = btrfs_lookup_block_group(root->fs_info,
6224                                                        search_start);
6225                 /*
6226                  * we don't want to use the block group if it doesn't match our
6227                  * allocation bits, or if its not cached.
6228                  *
6229                  * However if we are re-searching with an ideal block group
6230                  * picked out then we don't care that the block group is cached.
6231                  */
6232                 if (block_group && block_group_bits(block_group, flags) &&
6233                     block_group->cached != BTRFS_CACHE_NO) {
6234                         down_read(&space_info->groups_sem);
6235                         if (list_empty(&block_group->list) ||
6236                             block_group->ro) {
6237                                 /*
6238                                  * someone is removing this block group,
6239                                  * we can't jump into the have_block_group
6240                                  * target because our list pointers are not
6241                                  * valid
6242                                  */
6243                                 btrfs_put_block_group(block_group);
6244                                 up_read(&space_info->groups_sem);
6245                         } else {
6246                                 index = get_block_group_index(block_group);
6247                                 goto have_block_group;
6248                         }
6249                 } else if (block_group) {
6250                         btrfs_put_block_group(block_group);
6251                 }
6252         }
6253 search:
6254         have_caching_bg = false;
6255         down_read(&space_info->groups_sem);
6256         list_for_each_entry(block_group, &space_info->block_groups[index],
6257                             list) {
6258                 u64 offset;
6259                 int cached;
6260
6261                 btrfs_get_block_group(block_group);
6262                 search_start = block_group->key.objectid;
6263
6264                 /*
6265                  * this can happen if we end up cycling through all the
6266                  * raid types, but we want to make sure we only allocate
6267                  * for the proper type.
6268                  */
6269                 if (!block_group_bits(block_group, flags)) {
6270                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6271                                 BTRFS_BLOCK_GROUP_RAID1 |
6272                                 BTRFS_BLOCK_GROUP_RAID5 |
6273                                 BTRFS_BLOCK_GROUP_RAID6 |
6274                                 BTRFS_BLOCK_GROUP_RAID10;
6275
6276                         /*
6277                          * if they asked for extra copies and this block group
6278                          * doesn't provide them, bail.  This does allow us to
6279                          * fill raid0 from raid1.
6280                          */
6281                         if ((flags & extra) && !(block_group->flags & extra))
6282                                 goto loop;
6283                 }
6284
6285 have_block_group:
6286                 cached = block_group_cache_done(block_group);
6287                 if (unlikely(!cached)) {
6288                         ret = cache_block_group(block_group, 0);
6289                         BUG_ON(ret < 0);
6290                         ret = 0;
6291                 }
6292
6293                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6294                         goto loop;
6295                 if (unlikely(block_group->ro))
6296                         goto loop;
6297
6298                 /*
6299                  * Ok we want to try and use the cluster allocator, so
6300                  * lets look there
6301                  */
6302                 if (last_ptr) {
6303                         struct btrfs_block_group_cache *used_block_group;
6304                         unsigned long aligned_cluster;
6305                         /*
6306                          * the refill lock keeps out other
6307                          * people trying to start a new cluster
6308                          */
6309                         spin_lock(&last_ptr->refill_lock);
6310                         used_block_group = last_ptr->block_group;
6311                         if (used_block_group != block_group &&
6312                             (!used_block_group ||
6313                              used_block_group->ro ||
6314                              !block_group_bits(used_block_group, flags)))
6315                                 goto refill_cluster;
6316
6317                         if (used_block_group != block_group)
6318                                 btrfs_get_block_group(used_block_group);
6319
6320                         offset = btrfs_alloc_from_cluster(used_block_group,
6321                                                 last_ptr,
6322                                                 num_bytes,
6323                                                 used_block_group->key.objectid,
6324                                                 &max_extent_size);
6325                         if (offset) {
6326                                 /* we have a block, we're done */
6327                                 spin_unlock(&last_ptr->refill_lock);
6328                                 trace_btrfs_reserve_extent_cluster(root,
6329                                                 used_block_group,
6330                                                 search_start, num_bytes);
6331                                 if (used_block_group != block_group) {
6332                                         btrfs_put_block_group(block_group);
6333                                         block_group = used_block_group;
6334                                 }
6335                                 goto checks;
6336                         }
6337
6338                         WARN_ON(last_ptr->block_group != used_block_group);
6339                         if (used_block_group != block_group)
6340                                 btrfs_put_block_group(used_block_group);
6341 refill_cluster:
6342                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6343                          * set up a new clusters, so lets just skip it
6344                          * and let the allocator find whatever block
6345                          * it can find.  If we reach this point, we
6346                          * will have tried the cluster allocator
6347                          * plenty of times and not have found
6348                          * anything, so we are likely way too
6349                          * fragmented for the clustering stuff to find
6350                          * anything.
6351                          *
6352                          * However, if the cluster is taken from the
6353                          * current block group, release the cluster
6354                          * first, so that we stand a better chance of
6355                          * succeeding in the unclustered
6356                          * allocation.  */
6357                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6358                             last_ptr->block_group != block_group) {
6359                                 spin_unlock(&last_ptr->refill_lock);
6360                                 goto unclustered_alloc;
6361                         }
6362
6363                         /*
6364                          * this cluster didn't work out, free it and
6365                          * start over
6366                          */
6367                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6368
6369                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6370                                 spin_unlock(&last_ptr->refill_lock);
6371                                 goto unclustered_alloc;
6372                         }
6373
6374                         aligned_cluster = max_t(unsigned long,
6375                                                 empty_cluster + empty_size,
6376                                               block_group->full_stripe_len);
6377
6378                         /* allocate a cluster in this block group */
6379                         ret = btrfs_find_space_cluster(root, block_group,
6380                                                        last_ptr, search_start,
6381                                                        num_bytes,
6382                                                        aligned_cluster);
6383                         if (ret == 0) {
6384                                 /*
6385                                  * now pull our allocation out of this
6386                                  * cluster
6387                                  */
6388                                 offset = btrfs_alloc_from_cluster(block_group,
6389                                                         last_ptr,
6390                                                         num_bytes,
6391                                                         search_start,
6392                                                         &max_extent_size);
6393                                 if (offset) {
6394                                         /* we found one, proceed */
6395                                         spin_unlock(&last_ptr->refill_lock);
6396                                         trace_btrfs_reserve_extent_cluster(root,
6397                                                 block_group, search_start,
6398                                                 num_bytes);
6399                                         goto checks;
6400                                 }
6401                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6402                                    && !failed_cluster_refill) {
6403                                 spin_unlock(&last_ptr->refill_lock);
6404
6405                                 failed_cluster_refill = true;
6406                                 wait_block_group_cache_progress(block_group,
6407                                        num_bytes + empty_cluster + empty_size);
6408                                 goto have_block_group;
6409                         }
6410
6411                         /*
6412                          * at this point we either didn't find a cluster
6413                          * or we weren't able to allocate a block from our
6414                          * cluster.  Free the cluster we've been trying
6415                          * to use, and go to the next block group
6416                          */
6417                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6418                         spin_unlock(&last_ptr->refill_lock);
6419                         goto loop;
6420                 }
6421
6422 unclustered_alloc:
6423                 spin_lock(&block_group->free_space_ctl->tree_lock);
6424                 if (cached &&
6425                     block_group->free_space_ctl->free_space <
6426                     num_bytes + empty_cluster + empty_size) {
6427                         if (block_group->free_space_ctl->free_space >
6428                             max_extent_size)
6429                                 max_extent_size =
6430                                         block_group->free_space_ctl->free_space;
6431                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6432                         goto loop;
6433                 }
6434                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6435
6436                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6437                                                     num_bytes, empty_size,
6438                                                     &max_extent_size);
6439                 /*
6440                  * If we didn't find a chunk, and we haven't failed on this
6441                  * block group before, and this block group is in the middle of
6442                  * caching and we are ok with waiting, then go ahead and wait
6443                  * for progress to be made, and set failed_alloc to true.
6444                  *
6445                  * If failed_alloc is true then we've already waited on this
6446                  * block group once and should move on to the next block group.
6447                  */
6448                 if (!offset && !failed_alloc && !cached &&
6449                     loop > LOOP_CACHING_NOWAIT) {
6450                         wait_block_group_cache_progress(block_group,
6451                                                 num_bytes + empty_size);
6452                         failed_alloc = true;
6453                         goto have_block_group;
6454                 } else if (!offset) {
6455                         if (!cached)
6456                                 have_caching_bg = true;
6457                         goto loop;
6458                 }
6459 checks:
6460                 search_start = stripe_align(root, block_group,
6461                                             offset, num_bytes);
6462
6463                 /* move on to the next group */
6464                 if (search_start + num_bytes >
6465                     block_group->key.objectid + block_group->key.offset) {
6466                         btrfs_add_free_space(block_group, offset, num_bytes);
6467                         goto loop;
6468                 }
6469
6470                 if (offset < search_start)
6471                         btrfs_add_free_space(block_group, offset,
6472                                              search_start - offset);
6473                 BUG_ON(offset > search_start);
6474
6475                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6476                                                   alloc_type);
6477                 if (ret == -EAGAIN) {
6478                         btrfs_add_free_space(block_group, offset, num_bytes);
6479                         goto loop;
6480                 }
6481
6482                 /* we are all good, lets return */
6483                 ins->objectid = search_start;
6484                 ins->offset = num_bytes;
6485
6486                 trace_btrfs_reserve_extent(orig_root, block_group,
6487                                            search_start, num_bytes);
6488                 btrfs_put_block_group(block_group);
6489                 break;
6490 loop:
6491                 failed_cluster_refill = false;
6492                 failed_alloc = false;
6493                 BUG_ON(index != get_block_group_index(block_group));
6494                 btrfs_put_block_group(block_group);
6495         }
6496         up_read(&space_info->groups_sem);
6497
6498         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6499                 goto search;
6500
6501         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6502                 goto search;
6503
6504         /*
6505          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6506          *                      caching kthreads as we move along
6507          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6508          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6509          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6510          *                      again
6511          */
6512         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6513                 index = 0;
6514                 loop++;
6515                 if (loop == LOOP_ALLOC_CHUNK) {
6516                         struct btrfs_trans_handle *trans;
6517
6518                         trans = btrfs_join_transaction(root);
6519                         if (IS_ERR(trans)) {
6520                                 ret = PTR_ERR(trans);
6521                                 goto out;
6522                         }
6523
6524                         ret = do_chunk_alloc(trans, root, flags,
6525                                              CHUNK_ALLOC_FORCE);
6526                         /*
6527                          * Do not bail out on ENOSPC since we
6528                          * can do more things.
6529                          */
6530                         if (ret < 0 && ret != -ENOSPC)
6531                                 btrfs_abort_transaction(trans,
6532                                                         root, ret);
6533                         else
6534                                 ret = 0;
6535                         btrfs_end_transaction(trans, root);
6536                         if (ret)
6537                                 goto out;
6538                 }
6539
6540                 if (loop == LOOP_NO_EMPTY_SIZE) {
6541                         empty_size = 0;
6542                         empty_cluster = 0;
6543                 }
6544
6545                 goto search;
6546         } else if (!ins->objectid) {
6547                 ret = -ENOSPC;
6548         } else if (ins->objectid) {
6549                 ret = 0;
6550         }
6551 out:
6552         if (ret == -ENOSPC)
6553                 ins->offset = max_extent_size;
6554         return ret;
6555 }
6556
6557 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6558                             int dump_block_groups)
6559 {
6560         struct btrfs_block_group_cache *cache;
6561         int index = 0;
6562
6563         spin_lock(&info->lock);
6564         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6565                info->flags,
6566                info->total_bytes - info->bytes_used - info->bytes_pinned -
6567                info->bytes_reserved - info->bytes_readonly,
6568                (info->full) ? "" : "not ");
6569         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6570                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6571                info->total_bytes, info->bytes_used, info->bytes_pinned,
6572                info->bytes_reserved, info->bytes_may_use,
6573                info->bytes_readonly);
6574         spin_unlock(&info->lock);
6575
6576         if (!dump_block_groups)
6577                 return;
6578
6579         down_read(&info->groups_sem);
6580 again:
6581         list_for_each_entry(cache, &info->block_groups[index], list) {
6582                 spin_lock(&cache->lock);
6583                 printk(KERN_INFO "BTRFS: "
6584                            "block group %llu has %llu bytes, "
6585                            "%llu used %llu pinned %llu reserved %s\n",
6586                        cache->key.objectid, cache->key.offset,
6587                        btrfs_block_group_used(&cache->item), cache->pinned,
6588                        cache->reserved, cache->ro ? "[readonly]" : "");
6589                 btrfs_dump_free_space(cache, bytes);
6590                 spin_unlock(&cache->lock);
6591         }
6592         if (++index < BTRFS_NR_RAID_TYPES)
6593                 goto again;
6594         up_read(&info->groups_sem);
6595 }
6596
6597 int btrfs_reserve_extent(struct btrfs_root *root,
6598                          u64 num_bytes, u64 min_alloc_size,
6599                          u64 empty_size, u64 hint_byte,
6600                          struct btrfs_key *ins, int is_data)
6601 {
6602         bool final_tried = false;
6603         u64 flags;
6604         int ret;
6605
6606         flags = btrfs_get_alloc_profile(root, is_data);
6607 again:
6608         WARN_ON(num_bytes < root->sectorsize);
6609         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6610                                flags);
6611
6612         if (ret == -ENOSPC) {
6613                 if (!final_tried && ins->offset) {
6614                         num_bytes = min(num_bytes >> 1, ins->offset);
6615                         num_bytes = round_down(num_bytes, root->sectorsize);
6616                         num_bytes = max(num_bytes, min_alloc_size);
6617                         if (num_bytes == min_alloc_size)
6618                                 final_tried = true;
6619                         goto again;
6620                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6621                         struct btrfs_space_info *sinfo;
6622
6623                         sinfo = __find_space_info(root->fs_info, flags);
6624                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6625                                 flags, num_bytes);
6626                         if (sinfo)
6627                                 dump_space_info(sinfo, num_bytes, 1);
6628                 }
6629         }
6630
6631         return ret;
6632 }
6633
6634 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6635                                         u64 start, u64 len, int pin)
6636 {
6637         struct btrfs_block_group_cache *cache;
6638         int ret = 0;
6639
6640         cache = btrfs_lookup_block_group(root->fs_info, start);
6641         if (!cache) {
6642                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6643                         start);
6644                 return -ENOSPC;
6645         }
6646
6647         if (btrfs_test_opt(root, DISCARD))
6648                 ret = btrfs_discard_extent(root, start, len, NULL);
6649
6650         if (pin)
6651                 pin_down_extent(root, cache, start, len, 1);
6652         else {
6653                 btrfs_add_free_space(cache, start, len);
6654                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6655         }
6656         btrfs_put_block_group(cache);
6657
6658         trace_btrfs_reserved_extent_free(root, start, len);
6659
6660         return ret;
6661 }
6662
6663 int btrfs_free_reserved_extent(struct btrfs_root *root,
6664                                         u64 start, u64 len)
6665 {
6666         return __btrfs_free_reserved_extent(root, start, len, 0);
6667 }
6668
6669 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6670                                        u64 start, u64 len)
6671 {
6672         return __btrfs_free_reserved_extent(root, start, len, 1);
6673 }
6674
6675 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6676                                       struct btrfs_root *root,
6677                                       u64 parent, u64 root_objectid,
6678                                       u64 flags, u64 owner, u64 offset,
6679                                       struct btrfs_key *ins, int ref_mod)
6680 {
6681         int ret;
6682         struct btrfs_fs_info *fs_info = root->fs_info;
6683         struct btrfs_extent_item *extent_item;
6684         struct btrfs_extent_inline_ref *iref;
6685         struct btrfs_path *path;
6686         struct extent_buffer *leaf;
6687         int type;
6688         u32 size;
6689
6690         if (parent > 0)
6691                 type = BTRFS_SHARED_DATA_REF_KEY;
6692         else
6693                 type = BTRFS_EXTENT_DATA_REF_KEY;
6694
6695         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6696
6697         path = btrfs_alloc_path();
6698         if (!path)
6699                 return -ENOMEM;
6700
6701         path->leave_spinning = 1;
6702         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6703                                       ins, size);
6704         if (ret) {
6705                 btrfs_free_path(path);
6706                 return ret;
6707         }
6708
6709         leaf = path->nodes[0];
6710         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6711                                      struct btrfs_extent_item);
6712         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6713         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6714         btrfs_set_extent_flags(leaf, extent_item,
6715                                flags | BTRFS_EXTENT_FLAG_DATA);
6716
6717         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6718         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6719         if (parent > 0) {
6720                 struct btrfs_shared_data_ref *ref;
6721                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6722                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6723                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6724         } else {
6725                 struct btrfs_extent_data_ref *ref;
6726                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6727                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6728                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6729                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6730                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6731         }
6732
6733         btrfs_mark_buffer_dirty(path->nodes[0]);
6734         btrfs_free_path(path);
6735
6736         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6737         if (ret) { /* -ENOENT, logic error */
6738                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6739                         ins->objectid, ins->offset);
6740                 BUG();
6741         }
6742         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6743         return ret;
6744 }
6745
6746 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6747                                      struct btrfs_root *root,
6748                                      u64 parent, u64 root_objectid,
6749                                      u64 flags, struct btrfs_disk_key *key,
6750                                      int level, struct btrfs_key *ins)
6751 {
6752         int ret;
6753         struct btrfs_fs_info *fs_info = root->fs_info;
6754         struct btrfs_extent_item *extent_item;
6755         struct btrfs_tree_block_info *block_info;
6756         struct btrfs_extent_inline_ref *iref;
6757         struct btrfs_path *path;
6758         struct extent_buffer *leaf;
6759         u32 size = sizeof(*extent_item) + sizeof(*iref);
6760         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6761                                                  SKINNY_METADATA);
6762
6763         if (!skinny_metadata)
6764                 size += sizeof(*block_info);
6765
6766         path = btrfs_alloc_path();
6767         if (!path) {
6768                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6769                                                    root->leafsize);
6770                 return -ENOMEM;
6771         }
6772
6773         path->leave_spinning = 1;
6774         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6775                                       ins, size);
6776         if (ret) {
6777                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6778                                                    root->leafsize);
6779                 btrfs_free_path(path);
6780                 return ret;
6781         }
6782
6783         leaf = path->nodes[0];
6784         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6785                                      struct btrfs_extent_item);
6786         btrfs_set_extent_refs(leaf, extent_item, 1);
6787         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6788         btrfs_set_extent_flags(leaf, extent_item,
6789                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6790
6791         if (skinny_metadata) {
6792                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6793         } else {
6794                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6795                 btrfs_set_tree_block_key(leaf, block_info, key);
6796                 btrfs_set_tree_block_level(leaf, block_info, level);
6797                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6798         }
6799
6800         if (parent > 0) {
6801                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6802                 btrfs_set_extent_inline_ref_type(leaf, iref,
6803                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6804                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6805         } else {
6806                 btrfs_set_extent_inline_ref_type(leaf, iref,
6807                                                  BTRFS_TREE_BLOCK_REF_KEY);
6808                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6809         }
6810
6811         btrfs_mark_buffer_dirty(leaf);
6812         btrfs_free_path(path);
6813
6814         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6815         if (ret) { /* -ENOENT, logic error */
6816                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6817                         ins->objectid, ins->offset);
6818                 BUG();
6819         }
6820
6821         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6822         return ret;
6823 }
6824
6825 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6826                                      struct btrfs_root *root,
6827                                      u64 root_objectid, u64 owner,
6828                                      u64 offset, struct btrfs_key *ins)
6829 {
6830         int ret;
6831
6832         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6833
6834         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6835                                          ins->offset, 0,
6836                                          root_objectid, owner, offset,
6837                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6838         return ret;
6839 }
6840
6841 /*
6842  * this is used by the tree logging recovery code.  It records that
6843  * an extent has been allocated and makes sure to clear the free
6844  * space cache bits as well
6845  */
6846 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6847                                    struct btrfs_root *root,
6848                                    u64 root_objectid, u64 owner, u64 offset,
6849                                    struct btrfs_key *ins)
6850 {
6851         int ret;
6852         struct btrfs_block_group_cache *block_group;
6853
6854         /*
6855          * Mixed block groups will exclude before processing the log so we only
6856          * need to do the exlude dance if this fs isn't mixed.
6857          */
6858         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6859                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6860                 if (ret)
6861                         return ret;
6862         }
6863
6864         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6865         if (!block_group)
6866                 return -EINVAL;
6867
6868         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6869                                           RESERVE_ALLOC_NO_ACCOUNT);
6870         BUG_ON(ret); /* logic error */
6871         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6872                                          0, owner, offset, ins, 1);
6873         btrfs_put_block_group(block_group);
6874         return ret;
6875 }
6876
6877 static struct extent_buffer *
6878 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6879                       u64 bytenr, u32 blocksize, int level)
6880 {
6881         struct extent_buffer *buf;
6882
6883         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6884         if (!buf)
6885                 return ERR_PTR(-ENOMEM);
6886         btrfs_set_header_generation(buf, trans->transid);
6887         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6888         btrfs_tree_lock(buf);
6889         clean_tree_block(trans, root, buf);
6890         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6891
6892         btrfs_set_lock_blocking(buf);
6893         btrfs_set_buffer_uptodate(buf);
6894
6895         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6896                 /*
6897                  * we allow two log transactions at a time, use different
6898                  * EXENT bit to differentiate dirty pages.
6899                  */
6900                 if (root->log_transid % 2 == 0)
6901                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6902                                         buf->start + buf->len - 1, GFP_NOFS);
6903                 else
6904                         set_extent_new(&root->dirty_log_pages, buf->start,
6905                                         buf->start + buf->len - 1, GFP_NOFS);
6906         } else {
6907                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6908                          buf->start + buf->len - 1, GFP_NOFS);
6909         }
6910         trans->blocks_used++;
6911         /* this returns a buffer locked for blocking */
6912         return buf;
6913 }
6914
6915 static struct btrfs_block_rsv *
6916 use_block_rsv(struct btrfs_trans_handle *trans,
6917               struct btrfs_root *root, u32 blocksize)
6918 {
6919         struct btrfs_block_rsv *block_rsv;
6920         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6921         int ret;
6922         bool global_updated = false;
6923
6924         block_rsv = get_block_rsv(trans, root);
6925
6926         if (unlikely(block_rsv->size == 0))
6927                 goto try_reserve;
6928 again:
6929         ret = block_rsv_use_bytes(block_rsv, blocksize);
6930         if (!ret)
6931                 return block_rsv;
6932
6933         if (block_rsv->failfast)
6934                 return ERR_PTR(ret);
6935
6936         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6937                 global_updated = true;
6938                 update_global_block_rsv(root->fs_info);
6939                 goto again;
6940         }
6941
6942         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6943                 static DEFINE_RATELIMIT_STATE(_rs,
6944                                 DEFAULT_RATELIMIT_INTERVAL * 10,
6945                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
6946                 if (__ratelimit(&_rs))
6947                         WARN(1, KERN_DEBUG
6948                                 "BTRFS: block rsv returned %d\n", ret);
6949         }
6950 try_reserve:
6951         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6952                                      BTRFS_RESERVE_NO_FLUSH);
6953         if (!ret)
6954                 return block_rsv;
6955         /*
6956          * If we couldn't reserve metadata bytes try and use some from
6957          * the global reserve if its space type is the same as the global
6958          * reservation.
6959          */
6960         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6961             block_rsv->space_info == global_rsv->space_info) {
6962                 ret = block_rsv_use_bytes(global_rsv, blocksize);
6963                 if (!ret)
6964                         return global_rsv;
6965         }
6966         return ERR_PTR(ret);
6967 }
6968
6969 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6970                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6971 {
6972         block_rsv_add_bytes(block_rsv, blocksize, 0);
6973         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6974 }
6975
6976 /*
6977  * finds a free extent and does all the dirty work required for allocation
6978  * returns the key for the extent through ins, and a tree buffer for
6979  * the first block of the extent through buf.
6980  *
6981  * returns the tree buffer or NULL.
6982  */
6983 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6984                                         struct btrfs_root *root, u32 blocksize,
6985                                         u64 parent, u64 root_objectid,
6986                                         struct btrfs_disk_key *key, int level,
6987                                         u64 hint, u64 empty_size)
6988 {
6989         struct btrfs_key ins;
6990         struct btrfs_block_rsv *block_rsv;
6991         struct extent_buffer *buf;
6992         u64 flags = 0;
6993         int ret;
6994         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6995                                                  SKINNY_METADATA);
6996
6997         block_rsv = use_block_rsv(trans, root, blocksize);
6998         if (IS_ERR(block_rsv))
6999                 return ERR_CAST(block_rsv);
7000
7001         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7002                                    empty_size, hint, &ins, 0);
7003         if (ret) {
7004                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7005                 return ERR_PTR(ret);
7006         }
7007
7008         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7009                                     blocksize, level);
7010         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7011
7012         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7013                 if (parent == 0)
7014                         parent = ins.objectid;
7015                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7016         } else
7017                 BUG_ON(parent > 0);
7018
7019         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7020                 struct btrfs_delayed_extent_op *extent_op;
7021                 extent_op = btrfs_alloc_delayed_extent_op();
7022                 BUG_ON(!extent_op); /* -ENOMEM */
7023                 if (key)
7024                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7025                 else
7026                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7027                 extent_op->flags_to_set = flags;
7028                 if (skinny_metadata)
7029                         extent_op->update_key = 0;
7030                 else
7031                         extent_op->update_key = 1;
7032                 extent_op->update_flags = 1;
7033                 extent_op->is_data = 0;
7034                 extent_op->level = level;
7035
7036                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7037                                         ins.objectid,
7038                                         ins.offset, parent, root_objectid,
7039                                         level, BTRFS_ADD_DELAYED_EXTENT,
7040                                         extent_op, 0);
7041                 BUG_ON(ret); /* -ENOMEM */
7042         }
7043         return buf;
7044 }
7045
7046 struct walk_control {
7047         u64 refs[BTRFS_MAX_LEVEL];
7048         u64 flags[BTRFS_MAX_LEVEL];
7049         struct btrfs_key update_progress;
7050         int stage;
7051         int level;
7052         int shared_level;
7053         int update_ref;
7054         int keep_locks;
7055         int reada_slot;
7056         int reada_count;
7057         int for_reloc;
7058 };
7059
7060 #define DROP_REFERENCE  1
7061 #define UPDATE_BACKREF  2
7062
7063 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7064                                      struct btrfs_root *root,
7065                                      struct walk_control *wc,
7066                                      struct btrfs_path *path)
7067 {
7068         u64 bytenr;
7069         u64 generation;
7070         u64 refs;
7071         u64 flags;
7072         u32 nritems;
7073         u32 blocksize;
7074         struct btrfs_key key;
7075         struct extent_buffer *eb;
7076         int ret;
7077         int slot;
7078         int nread = 0;
7079
7080         if (path->slots[wc->level] < wc->reada_slot) {
7081                 wc->reada_count = wc->reada_count * 2 / 3;
7082                 wc->reada_count = max(wc->reada_count, 2);
7083         } else {
7084                 wc->reada_count = wc->reada_count * 3 / 2;
7085                 wc->reada_count = min_t(int, wc->reada_count,
7086                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7087         }
7088
7089         eb = path->nodes[wc->level];
7090         nritems = btrfs_header_nritems(eb);
7091         blocksize = btrfs_level_size(root, wc->level - 1);
7092
7093         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7094                 if (nread >= wc->reada_count)
7095                         break;
7096
7097                 cond_resched();
7098                 bytenr = btrfs_node_blockptr(eb, slot);
7099                 generation = btrfs_node_ptr_generation(eb, slot);
7100
7101                 if (slot == path->slots[wc->level])
7102                         goto reada;
7103
7104                 if (wc->stage == UPDATE_BACKREF &&
7105                     generation <= root->root_key.offset)
7106                         continue;
7107
7108                 /* We don't lock the tree block, it's OK to be racy here */
7109                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7110                                                wc->level - 1, 1, &refs,
7111                                                &flags);
7112                 /* We don't care about errors in readahead. */
7113                 if (ret < 0)
7114                         continue;
7115                 BUG_ON(refs == 0);
7116
7117                 if (wc->stage == DROP_REFERENCE) {
7118                         if (refs == 1)
7119                                 goto reada;
7120
7121                         if (wc->level == 1 &&
7122                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7123                                 continue;
7124                         if (!wc->update_ref ||
7125                             generation <= root->root_key.offset)
7126                                 continue;
7127                         btrfs_node_key_to_cpu(eb, &key, slot);
7128                         ret = btrfs_comp_cpu_keys(&key,
7129                                                   &wc->update_progress);
7130                         if (ret < 0)
7131                                 continue;
7132                 } else {
7133                         if (wc->level == 1 &&
7134                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7135                                 continue;
7136                 }
7137 reada:
7138                 ret = readahead_tree_block(root, bytenr, blocksize,
7139                                            generation);
7140                 if (ret)
7141                         break;
7142                 nread++;
7143         }
7144         wc->reada_slot = slot;
7145 }
7146
7147 /*
7148  * helper to process tree block while walking down the tree.
7149  *
7150  * when wc->stage == UPDATE_BACKREF, this function updates
7151  * back refs for pointers in the block.
7152  *
7153  * NOTE: return value 1 means we should stop walking down.
7154  */
7155 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7156                                    struct btrfs_root *root,
7157                                    struct btrfs_path *path,
7158                                    struct walk_control *wc, int lookup_info)
7159 {
7160         int level = wc->level;
7161         struct extent_buffer *eb = path->nodes[level];
7162         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7163         int ret;
7164
7165         if (wc->stage == UPDATE_BACKREF &&
7166             btrfs_header_owner(eb) != root->root_key.objectid)
7167                 return 1;
7168
7169         /*
7170          * when reference count of tree block is 1, it won't increase
7171          * again. once full backref flag is set, we never clear it.
7172          */
7173         if (lookup_info &&
7174             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7175              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7176                 BUG_ON(!path->locks[level]);
7177                 ret = btrfs_lookup_extent_info(trans, root,
7178                                                eb->start, level, 1,
7179                                                &wc->refs[level],
7180                                                &wc->flags[level]);
7181                 BUG_ON(ret == -ENOMEM);
7182                 if (ret)
7183                         return ret;
7184                 BUG_ON(wc->refs[level] == 0);
7185         }
7186
7187         if (wc->stage == DROP_REFERENCE) {
7188                 if (wc->refs[level] > 1)
7189                         return 1;
7190
7191                 if (path->locks[level] && !wc->keep_locks) {
7192                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7193                         path->locks[level] = 0;
7194                 }
7195                 return 0;
7196         }
7197
7198         /* wc->stage == UPDATE_BACKREF */
7199         if (!(wc->flags[level] & flag)) {
7200                 BUG_ON(!path->locks[level]);
7201                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7202                 BUG_ON(ret); /* -ENOMEM */
7203                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7204                 BUG_ON(ret); /* -ENOMEM */
7205                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7206                                                   eb->len, flag,
7207                                                   btrfs_header_level(eb), 0);
7208                 BUG_ON(ret); /* -ENOMEM */
7209                 wc->flags[level] |= flag;
7210         }
7211
7212         /*
7213          * the block is shared by multiple trees, so it's not good to
7214          * keep the tree lock
7215          */
7216         if (path->locks[level] && level > 0) {
7217                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7218                 path->locks[level] = 0;
7219         }
7220         return 0;
7221 }
7222
7223 /*
7224  * helper to process tree block pointer.
7225  *
7226  * when wc->stage == DROP_REFERENCE, this function checks
7227  * reference count of the block pointed to. if the block
7228  * is shared and we need update back refs for the subtree
7229  * rooted at the block, this function changes wc->stage to
7230  * UPDATE_BACKREF. if the block is shared and there is no
7231  * need to update back, this function drops the reference
7232  * to the block.
7233  *
7234  * NOTE: return value 1 means we should stop walking down.
7235  */
7236 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7237                                  struct btrfs_root *root,
7238                                  struct btrfs_path *path,
7239                                  struct walk_control *wc, int *lookup_info)
7240 {
7241         u64 bytenr;
7242         u64 generation;
7243         u64 parent;
7244         u32 blocksize;
7245         struct btrfs_key key;
7246         struct extent_buffer *next;
7247         int level = wc->level;
7248         int reada = 0;
7249         int ret = 0;
7250
7251         generation = btrfs_node_ptr_generation(path->nodes[level],
7252                                                path->slots[level]);
7253         /*
7254          * if the lower level block was created before the snapshot
7255          * was created, we know there is no need to update back refs
7256          * for the subtree
7257          */
7258         if (wc->stage == UPDATE_BACKREF &&
7259             generation <= root->root_key.offset) {
7260                 *lookup_info = 1;
7261                 return 1;
7262         }
7263
7264         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7265         blocksize = btrfs_level_size(root, level - 1);
7266
7267         next = btrfs_find_tree_block(root, bytenr, blocksize);
7268         if (!next) {
7269                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7270                 if (!next)
7271                         return -ENOMEM;
7272                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7273                                                level - 1);
7274                 reada = 1;
7275         }
7276         btrfs_tree_lock(next);
7277         btrfs_set_lock_blocking(next);
7278
7279         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7280                                        &wc->refs[level - 1],
7281                                        &wc->flags[level - 1]);
7282         if (ret < 0) {
7283                 btrfs_tree_unlock(next);
7284                 return ret;
7285         }
7286
7287         if (unlikely(wc->refs[level - 1] == 0)) {
7288                 btrfs_err(root->fs_info, "Missing references.");
7289                 BUG();
7290         }
7291         *lookup_info = 0;
7292
7293         if (wc->stage == DROP_REFERENCE) {
7294                 if (wc->refs[level - 1] > 1) {
7295                         if (level == 1 &&
7296                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7297                                 goto skip;
7298
7299                         if (!wc->update_ref ||
7300                             generation <= root->root_key.offset)
7301                                 goto skip;
7302
7303                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7304                                               path->slots[level]);
7305                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7306                         if (ret < 0)
7307                                 goto skip;
7308
7309                         wc->stage = UPDATE_BACKREF;
7310                         wc->shared_level = level - 1;
7311                 }
7312         } else {
7313                 if (level == 1 &&
7314                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7315                         goto skip;
7316         }
7317
7318         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7319                 btrfs_tree_unlock(next);
7320                 free_extent_buffer(next);
7321                 next = NULL;
7322                 *lookup_info = 1;
7323         }
7324
7325         if (!next) {
7326                 if (reada && level == 1)
7327                         reada_walk_down(trans, root, wc, path);
7328                 next = read_tree_block(root, bytenr, blocksize, generation);
7329                 if (!next || !extent_buffer_uptodate(next)) {
7330                         free_extent_buffer(next);
7331                         return -EIO;
7332                 }
7333                 btrfs_tree_lock(next);
7334                 btrfs_set_lock_blocking(next);
7335         }
7336
7337         level--;
7338         BUG_ON(level != btrfs_header_level(next));
7339         path->nodes[level] = next;
7340         path->slots[level] = 0;
7341         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7342         wc->level = level;
7343         if (wc->level == 1)
7344                 wc->reada_slot = 0;
7345         return 0;
7346 skip:
7347         wc->refs[level - 1] = 0;
7348         wc->flags[level - 1] = 0;
7349         if (wc->stage == DROP_REFERENCE) {
7350                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7351                         parent = path->nodes[level]->start;
7352                 } else {
7353                         BUG_ON(root->root_key.objectid !=
7354                                btrfs_header_owner(path->nodes[level]));
7355                         parent = 0;
7356                 }
7357
7358                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7359                                 root->root_key.objectid, level - 1, 0, 0);
7360                 BUG_ON(ret); /* -ENOMEM */
7361         }
7362         btrfs_tree_unlock(next);
7363         free_extent_buffer(next);
7364         *lookup_info = 1;
7365         return 1;
7366 }
7367
7368 /*
7369  * helper to process tree block while walking up the tree.
7370  *
7371  * when wc->stage == DROP_REFERENCE, this function drops
7372  * reference count on the block.
7373  *
7374  * when wc->stage == UPDATE_BACKREF, this function changes
7375  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7376  * to UPDATE_BACKREF previously while processing the block.
7377  *
7378  * NOTE: return value 1 means we should stop walking up.
7379  */
7380 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7381                                  struct btrfs_root *root,
7382                                  struct btrfs_path *path,
7383                                  struct walk_control *wc)
7384 {
7385         int ret;
7386         int level = wc->level;
7387         struct extent_buffer *eb = path->nodes[level];
7388         u64 parent = 0;
7389
7390         if (wc->stage == UPDATE_BACKREF) {
7391                 BUG_ON(wc->shared_level < level);
7392                 if (level < wc->shared_level)
7393                         goto out;
7394
7395                 ret = find_next_key(path, level + 1, &wc->update_progress);
7396                 if (ret > 0)
7397                         wc->update_ref = 0;
7398
7399                 wc->stage = DROP_REFERENCE;
7400                 wc->shared_level = -1;
7401                 path->slots[level] = 0;
7402
7403                 /*
7404                  * check reference count again if the block isn't locked.
7405                  * we should start walking down the tree again if reference
7406                  * count is one.
7407                  */
7408                 if (!path->locks[level]) {
7409                         BUG_ON(level == 0);
7410                         btrfs_tree_lock(eb);
7411                         btrfs_set_lock_blocking(eb);
7412                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7413
7414                         ret = btrfs_lookup_extent_info(trans, root,
7415                                                        eb->start, level, 1,
7416                                                        &wc->refs[level],
7417                                                        &wc->flags[level]);
7418                         if (ret < 0) {
7419                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7420                                 path->locks[level] = 0;
7421                                 return ret;
7422                         }
7423                         BUG_ON(wc->refs[level] == 0);
7424                         if (wc->refs[level] == 1) {
7425                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7426                                 path->locks[level] = 0;
7427                                 return 1;
7428                         }
7429                 }
7430         }
7431
7432         /* wc->stage == DROP_REFERENCE */
7433         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7434
7435         if (wc->refs[level] == 1) {
7436                 if (level == 0) {
7437                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7438                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7439                                                     wc->for_reloc);
7440                         else
7441                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7442                                                     wc->for_reloc);
7443                         BUG_ON(ret); /* -ENOMEM */
7444                 }
7445                 /* make block locked assertion in clean_tree_block happy */
7446                 if (!path->locks[level] &&
7447                     btrfs_header_generation(eb) == trans->transid) {
7448                         btrfs_tree_lock(eb);
7449                         btrfs_set_lock_blocking(eb);
7450                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7451                 }
7452                 clean_tree_block(trans, root, eb);
7453         }
7454
7455         if (eb == root->node) {
7456                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7457                         parent = eb->start;
7458                 else
7459                         BUG_ON(root->root_key.objectid !=
7460                                btrfs_header_owner(eb));
7461         } else {
7462                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7463                         parent = path->nodes[level + 1]->start;
7464                 else
7465                         BUG_ON(root->root_key.objectid !=
7466                                btrfs_header_owner(path->nodes[level + 1]));
7467         }
7468
7469         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7470 out:
7471         wc->refs[level] = 0;
7472         wc->flags[level] = 0;
7473         return 0;
7474 }
7475
7476 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7477                                    struct btrfs_root *root,
7478                                    struct btrfs_path *path,
7479                                    struct walk_control *wc)
7480 {
7481         int level = wc->level;
7482         int lookup_info = 1;
7483         int ret;
7484
7485         while (level >= 0) {
7486                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7487                 if (ret > 0)
7488                         break;
7489
7490                 if (level == 0)
7491                         break;
7492
7493                 if (path->slots[level] >=
7494                     btrfs_header_nritems(path->nodes[level]))
7495                         break;
7496
7497                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7498                 if (ret > 0) {
7499                         path->slots[level]++;
7500                         continue;
7501                 } else if (ret < 0)
7502                         return ret;
7503                 level = wc->level;
7504         }
7505         return 0;
7506 }
7507
7508 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7509                                  struct btrfs_root *root,
7510                                  struct btrfs_path *path,
7511                                  struct walk_control *wc, int max_level)
7512 {
7513         int level = wc->level;
7514         int ret;
7515
7516         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7517         while (level < max_level && path->nodes[level]) {
7518                 wc->level = level;
7519                 if (path->slots[level] + 1 <
7520                     btrfs_header_nritems(path->nodes[level])) {
7521                         path->slots[level]++;
7522                         return 0;
7523                 } else {
7524                         ret = walk_up_proc(trans, root, path, wc);
7525                         if (ret > 0)
7526                                 return 0;
7527
7528                         if (path->locks[level]) {
7529                                 btrfs_tree_unlock_rw(path->nodes[level],
7530                                                      path->locks[level]);
7531                                 path->locks[level] = 0;
7532                         }
7533                         free_extent_buffer(path->nodes[level]);
7534                         path->nodes[level] = NULL;
7535                         level++;
7536                 }
7537         }
7538         return 1;
7539 }
7540
7541 /*
7542  * drop a subvolume tree.
7543  *
7544  * this function traverses the tree freeing any blocks that only
7545  * referenced by the tree.
7546  *
7547  * when a shared tree block is found. this function decreases its
7548  * reference count by one. if update_ref is true, this function
7549  * also make sure backrefs for the shared block and all lower level
7550  * blocks are properly updated.
7551  *
7552  * If called with for_reloc == 0, may exit early with -EAGAIN
7553  */
7554 int btrfs_drop_snapshot(struct btrfs_root *root,
7555                          struct btrfs_block_rsv *block_rsv, int update_ref,
7556                          int for_reloc)
7557 {
7558         struct btrfs_path *path;
7559         struct btrfs_trans_handle *trans;
7560         struct btrfs_root *tree_root = root->fs_info->tree_root;
7561         struct btrfs_root_item *root_item = &root->root_item;
7562         struct walk_control *wc;
7563         struct btrfs_key key;
7564         int err = 0;
7565         int ret;
7566         int level;
7567         bool root_dropped = false;
7568
7569         path = btrfs_alloc_path();
7570         if (!path) {
7571                 err = -ENOMEM;
7572                 goto out;
7573         }
7574
7575         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7576         if (!wc) {
7577                 btrfs_free_path(path);
7578                 err = -ENOMEM;
7579                 goto out;
7580         }
7581
7582         trans = btrfs_start_transaction(tree_root, 0);
7583         if (IS_ERR(trans)) {
7584                 err = PTR_ERR(trans);
7585                 goto out_free;
7586         }
7587
7588         if (block_rsv)
7589                 trans->block_rsv = block_rsv;
7590
7591         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7592                 level = btrfs_header_level(root->node);
7593                 path->nodes[level] = btrfs_lock_root_node(root);
7594                 btrfs_set_lock_blocking(path->nodes[level]);
7595                 path->slots[level] = 0;
7596                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7597                 memset(&wc->update_progress, 0,
7598                        sizeof(wc->update_progress));
7599         } else {
7600                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7601                 memcpy(&wc->update_progress, &key,
7602                        sizeof(wc->update_progress));
7603
7604                 level = root_item->drop_level;
7605                 BUG_ON(level == 0);
7606                 path->lowest_level = level;
7607                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7608                 path->lowest_level = 0;
7609                 if (ret < 0) {
7610                         err = ret;
7611                         goto out_end_trans;
7612                 }
7613                 WARN_ON(ret > 0);
7614
7615                 /*
7616                  * unlock our path, this is safe because only this
7617                  * function is allowed to delete this snapshot
7618                  */
7619                 btrfs_unlock_up_safe(path, 0);
7620
7621                 level = btrfs_header_level(root->node);
7622                 while (1) {
7623                         btrfs_tree_lock(path->nodes[level]);
7624                         btrfs_set_lock_blocking(path->nodes[level]);
7625                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7626
7627                         ret = btrfs_lookup_extent_info(trans, root,
7628                                                 path->nodes[level]->start,
7629                                                 level, 1, &wc->refs[level],
7630                                                 &wc->flags[level]);
7631                         if (ret < 0) {
7632                                 err = ret;
7633                                 goto out_end_trans;
7634                         }
7635                         BUG_ON(wc->refs[level] == 0);
7636
7637                         if (level == root_item->drop_level)
7638                                 break;
7639
7640                         btrfs_tree_unlock(path->nodes[level]);
7641                         path->locks[level] = 0;
7642                         WARN_ON(wc->refs[level] != 1);
7643                         level--;
7644                 }
7645         }
7646
7647         wc->level = level;
7648         wc->shared_level = -1;
7649         wc->stage = DROP_REFERENCE;
7650         wc->update_ref = update_ref;
7651         wc->keep_locks = 0;
7652         wc->for_reloc = for_reloc;
7653         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7654
7655         while (1) {
7656
7657                 ret = walk_down_tree(trans, root, path, wc);
7658                 if (ret < 0) {
7659                         err = ret;
7660                         break;
7661                 }
7662
7663                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7664                 if (ret < 0) {
7665                         err = ret;
7666                         break;
7667                 }
7668
7669                 if (ret > 0) {
7670                         BUG_ON(wc->stage != DROP_REFERENCE);
7671                         break;
7672                 }
7673
7674                 if (wc->stage == DROP_REFERENCE) {
7675                         level = wc->level;
7676                         btrfs_node_key(path->nodes[level],
7677                                        &root_item->drop_progress,
7678                                        path->slots[level]);
7679                         root_item->drop_level = level;
7680                 }
7681
7682                 BUG_ON(wc->level == 0);
7683                 if (btrfs_should_end_transaction(trans, tree_root) ||
7684                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7685                         ret = btrfs_update_root(trans, tree_root,
7686                                                 &root->root_key,
7687                                                 root_item);
7688                         if (ret) {
7689                                 btrfs_abort_transaction(trans, tree_root, ret);
7690                                 err = ret;
7691                                 goto out_end_trans;
7692                         }
7693
7694                         btrfs_end_transaction_throttle(trans, tree_root);
7695                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7696                                 pr_debug("BTRFS: drop snapshot early exit\n");
7697                                 err = -EAGAIN;
7698                                 goto out_free;
7699                         }
7700
7701                         trans = btrfs_start_transaction(tree_root, 0);
7702                         if (IS_ERR(trans)) {
7703                                 err = PTR_ERR(trans);
7704                                 goto out_free;
7705                         }
7706                         if (block_rsv)
7707                                 trans->block_rsv = block_rsv;
7708                 }
7709         }
7710         btrfs_release_path(path);
7711         if (err)
7712                 goto out_end_trans;
7713
7714         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7715         if (ret) {
7716                 btrfs_abort_transaction(trans, tree_root, ret);
7717                 goto out_end_trans;
7718         }
7719
7720         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7721                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7722                                       NULL, NULL);
7723                 if (ret < 0) {
7724                         btrfs_abort_transaction(trans, tree_root, ret);
7725                         err = ret;
7726                         goto out_end_trans;
7727                 } else if (ret > 0) {
7728                         /* if we fail to delete the orphan item this time
7729                          * around, it'll get picked up the next time.
7730                          *
7731                          * The most common failure here is just -ENOENT.
7732                          */
7733                         btrfs_del_orphan_item(trans, tree_root,
7734                                               root->root_key.objectid);
7735                 }
7736         }
7737
7738         if (root->in_radix) {
7739                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7740         } else {
7741                 free_extent_buffer(root->node);
7742                 free_extent_buffer(root->commit_root);
7743                 btrfs_put_fs_root(root);
7744         }
7745         root_dropped = true;
7746 out_end_trans:
7747         btrfs_end_transaction_throttle(trans, tree_root);
7748 out_free:
7749         kfree(wc);
7750         btrfs_free_path(path);
7751 out:
7752         /*
7753          * So if we need to stop dropping the snapshot for whatever reason we
7754          * need to make sure to add it back to the dead root list so that we
7755          * keep trying to do the work later.  This also cleans up roots if we
7756          * don't have it in the radix (like when we recover after a power fail
7757          * or unmount) so we don't leak memory.
7758          */
7759         if (!for_reloc && root_dropped == false)
7760                 btrfs_add_dead_root(root);
7761         if (err && err != -EAGAIN)
7762                 btrfs_std_error(root->fs_info, err);
7763         return err;
7764 }
7765
7766 /*
7767  * drop subtree rooted at tree block 'node'.
7768  *
7769  * NOTE: this function will unlock and release tree block 'node'
7770  * only used by relocation code
7771  */
7772 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7773                         struct btrfs_root *root,
7774                         struct extent_buffer *node,
7775                         struct extent_buffer *parent)
7776 {
7777         struct btrfs_path *path;
7778         struct walk_control *wc;
7779         int level;
7780         int parent_level;
7781         int ret = 0;
7782         int wret;
7783
7784         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7785
7786         path = btrfs_alloc_path();
7787         if (!path)
7788                 return -ENOMEM;
7789
7790         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7791         if (!wc) {
7792                 btrfs_free_path(path);
7793                 return -ENOMEM;
7794         }
7795
7796         btrfs_assert_tree_locked(parent);
7797         parent_level = btrfs_header_level(parent);
7798         extent_buffer_get(parent);
7799         path->nodes[parent_level] = parent;
7800         path->slots[parent_level] = btrfs_header_nritems(parent);
7801
7802         btrfs_assert_tree_locked(node);
7803         level = btrfs_header_level(node);
7804         path->nodes[level] = node;
7805         path->slots[level] = 0;
7806         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7807
7808         wc->refs[parent_level] = 1;
7809         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7810         wc->level = level;
7811         wc->shared_level = -1;
7812         wc->stage = DROP_REFERENCE;
7813         wc->update_ref = 0;
7814         wc->keep_locks = 1;
7815         wc->for_reloc = 1;
7816         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7817
7818         while (1) {
7819                 wret = walk_down_tree(trans, root, path, wc);
7820                 if (wret < 0) {
7821                         ret = wret;
7822                         break;
7823                 }
7824
7825                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7826                 if (wret < 0)
7827                         ret = wret;
7828                 if (wret != 0)
7829                         break;
7830         }
7831
7832         kfree(wc);
7833         btrfs_free_path(path);
7834         return ret;
7835 }
7836
7837 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7838 {
7839         u64 num_devices;
7840         u64 stripped;
7841
7842         /*
7843          * if restripe for this chunk_type is on pick target profile and
7844          * return, otherwise do the usual balance
7845          */
7846         stripped = get_restripe_target(root->fs_info, flags);
7847         if (stripped)
7848                 return extended_to_chunk(stripped);
7849
7850         /*
7851          * we add in the count of missing devices because we want
7852          * to make sure that any RAID levels on a degraded FS
7853          * continue to be honored.
7854          */
7855         num_devices = root->fs_info->fs_devices->rw_devices +
7856                 root->fs_info->fs_devices->missing_devices;
7857
7858         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7859                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7860                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7861
7862         if (num_devices == 1) {
7863                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7864                 stripped = flags & ~stripped;
7865
7866                 /* turn raid0 into single device chunks */
7867                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7868                         return stripped;
7869
7870                 /* turn mirroring into duplication */
7871                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7872                              BTRFS_BLOCK_GROUP_RAID10))
7873                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7874         } else {
7875                 /* they already had raid on here, just return */
7876                 if (flags & stripped)
7877                         return flags;
7878
7879                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7880                 stripped = flags & ~stripped;
7881
7882                 /* switch duplicated blocks with raid1 */
7883                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7884                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7885
7886                 /* this is drive concat, leave it alone */
7887         }
7888
7889         return flags;
7890 }
7891
7892 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7893 {
7894         struct btrfs_space_info *sinfo = cache->space_info;
7895         u64 num_bytes;
7896         u64 min_allocable_bytes;
7897         int ret = -ENOSPC;
7898
7899
7900         /*
7901          * We need some metadata space and system metadata space for
7902          * allocating chunks in some corner cases until we force to set
7903          * it to be readonly.
7904          */
7905         if ((sinfo->flags &
7906              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7907             !force)
7908                 min_allocable_bytes = 1 * 1024 * 1024;
7909         else
7910                 min_allocable_bytes = 0;
7911
7912         spin_lock(&sinfo->lock);
7913         spin_lock(&cache->lock);
7914
7915         if (cache->ro) {
7916                 ret = 0;
7917                 goto out;
7918         }
7919
7920         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7921                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7922
7923         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7924             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7925             min_allocable_bytes <= sinfo->total_bytes) {
7926                 sinfo->bytes_readonly += num_bytes;
7927                 cache->ro = 1;
7928                 ret = 0;
7929         }
7930 out:
7931         spin_unlock(&cache->lock);
7932         spin_unlock(&sinfo->lock);
7933         return ret;
7934 }
7935
7936 int btrfs_set_block_group_ro(struct btrfs_root *root,
7937                              struct btrfs_block_group_cache *cache)
7938
7939 {
7940         struct btrfs_trans_handle *trans;
7941         u64 alloc_flags;
7942         int ret;
7943
7944         BUG_ON(cache->ro);
7945
7946         trans = btrfs_join_transaction(root);
7947         if (IS_ERR(trans))
7948                 return PTR_ERR(trans);
7949
7950         alloc_flags = update_block_group_flags(root, cache->flags);
7951         if (alloc_flags != cache->flags) {
7952                 ret = do_chunk_alloc(trans, root, alloc_flags,
7953                                      CHUNK_ALLOC_FORCE);
7954                 if (ret < 0)
7955                         goto out;
7956         }
7957
7958         ret = set_block_group_ro(cache, 0);
7959         if (!ret)
7960                 goto out;
7961         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7962         ret = do_chunk_alloc(trans, root, alloc_flags,
7963                              CHUNK_ALLOC_FORCE);
7964         if (ret < 0)
7965                 goto out;
7966         ret = set_block_group_ro(cache, 0);
7967 out:
7968         btrfs_end_transaction(trans, root);
7969         return ret;
7970 }
7971
7972 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7973                             struct btrfs_root *root, u64 type)
7974 {
7975         u64 alloc_flags = get_alloc_profile(root, type);
7976         return do_chunk_alloc(trans, root, alloc_flags,
7977                               CHUNK_ALLOC_FORCE);
7978 }
7979
7980 /*
7981  * helper to account the unused space of all the readonly block group in the
7982  * list. takes mirrors into account.
7983  */
7984 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7985 {
7986         struct btrfs_block_group_cache *block_group;
7987         u64 free_bytes = 0;
7988         int factor;
7989
7990         list_for_each_entry(block_group, groups_list, list) {
7991                 spin_lock(&block_group->lock);
7992
7993                 if (!block_group->ro) {
7994                         spin_unlock(&block_group->lock);
7995                         continue;
7996                 }
7997
7998                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7999                                           BTRFS_BLOCK_GROUP_RAID10 |
8000                                           BTRFS_BLOCK_GROUP_DUP))
8001                         factor = 2;
8002                 else
8003                         factor = 1;
8004
8005                 free_bytes += (block_group->key.offset -
8006                                btrfs_block_group_used(&block_group->item)) *
8007                                factor;
8008
8009                 spin_unlock(&block_group->lock);
8010         }
8011
8012         return free_bytes;
8013 }
8014
8015 /*
8016  * helper to account the unused space of all the readonly block group in the
8017  * space_info. takes mirrors into account.
8018  */
8019 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8020 {
8021         int i;
8022         u64 free_bytes = 0;
8023
8024         spin_lock(&sinfo->lock);
8025
8026         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8027                 if (!list_empty(&sinfo->block_groups[i]))
8028                         free_bytes += __btrfs_get_ro_block_group_free_space(
8029                                                 &sinfo->block_groups[i]);
8030
8031         spin_unlock(&sinfo->lock);
8032
8033         return free_bytes;
8034 }
8035
8036 void btrfs_set_block_group_rw(struct btrfs_root *root,
8037                               struct btrfs_block_group_cache *cache)
8038 {
8039         struct btrfs_space_info *sinfo = cache->space_info;
8040         u64 num_bytes;
8041
8042         BUG_ON(!cache->ro);
8043
8044         spin_lock(&sinfo->lock);
8045         spin_lock(&cache->lock);
8046         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8047                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8048         sinfo->bytes_readonly -= num_bytes;
8049         cache->ro = 0;
8050         spin_unlock(&cache->lock);
8051         spin_unlock(&sinfo->lock);
8052 }
8053
8054 /*
8055  * checks to see if its even possible to relocate this block group.
8056  *
8057  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8058  * ok to go ahead and try.
8059  */
8060 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8061 {
8062         struct btrfs_block_group_cache *block_group;
8063         struct btrfs_space_info *space_info;
8064         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8065         struct btrfs_device *device;
8066         struct btrfs_trans_handle *trans;
8067         u64 min_free;
8068         u64 dev_min = 1;
8069         u64 dev_nr = 0;
8070         u64 target;
8071         int index;
8072         int full = 0;
8073         int ret = 0;
8074
8075         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8076
8077         /* odd, couldn't find the block group, leave it alone */
8078         if (!block_group)
8079                 return -1;
8080
8081         min_free = btrfs_block_group_used(&block_group->item);
8082
8083         /* no bytes used, we're good */
8084         if (!min_free)
8085                 goto out;
8086
8087         space_info = block_group->space_info;
8088         spin_lock(&space_info->lock);
8089
8090         full = space_info->full;
8091
8092         /*
8093          * if this is the last block group we have in this space, we can't
8094          * relocate it unless we're able to allocate a new chunk below.
8095          *
8096          * Otherwise, we need to make sure we have room in the space to handle
8097          * all of the extents from this block group.  If we can, we're good
8098          */
8099         if ((space_info->total_bytes != block_group->key.offset) &&
8100             (space_info->bytes_used + space_info->bytes_reserved +
8101              space_info->bytes_pinned + space_info->bytes_readonly +
8102              min_free < space_info->total_bytes)) {
8103                 spin_unlock(&space_info->lock);
8104                 goto out;
8105         }
8106         spin_unlock(&space_info->lock);
8107
8108         /*
8109          * ok we don't have enough space, but maybe we have free space on our
8110          * devices to allocate new chunks for relocation, so loop through our
8111          * alloc devices and guess if we have enough space.  if this block
8112          * group is going to be restriped, run checks against the target
8113          * profile instead of the current one.
8114          */
8115         ret = -1;
8116
8117         /*
8118          * index:
8119          *      0: raid10
8120          *      1: raid1
8121          *      2: dup
8122          *      3: raid0
8123          *      4: single
8124          */
8125         target = get_restripe_target(root->fs_info, block_group->flags);
8126         if (target) {
8127                 index = __get_raid_index(extended_to_chunk(target));
8128         } else {
8129                 /*
8130                  * this is just a balance, so if we were marked as full
8131                  * we know there is no space for a new chunk
8132                  */
8133                 if (full)
8134                         goto out;
8135
8136                 index = get_block_group_index(block_group);
8137         }
8138
8139         if (index == BTRFS_RAID_RAID10) {
8140                 dev_min = 4;
8141                 /* Divide by 2 */
8142                 min_free >>= 1;
8143         } else if (index == BTRFS_RAID_RAID1) {
8144                 dev_min = 2;
8145         } else if (index == BTRFS_RAID_DUP) {
8146                 /* Multiply by 2 */
8147                 min_free <<= 1;
8148         } else if (index == BTRFS_RAID_RAID0) {
8149                 dev_min = fs_devices->rw_devices;
8150                 do_div(min_free, dev_min);
8151         }
8152
8153         /* We need to do this so that we can look at pending chunks */
8154         trans = btrfs_join_transaction(root);
8155         if (IS_ERR(trans)) {
8156                 ret = PTR_ERR(trans);
8157                 goto out;
8158         }
8159
8160         mutex_lock(&root->fs_info->chunk_mutex);
8161         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8162                 u64 dev_offset;
8163
8164                 /*
8165                  * check to make sure we can actually find a chunk with enough
8166                  * space to fit our block group in.
8167                  */
8168                 if (device->total_bytes > device->bytes_used + min_free &&
8169                     !device->is_tgtdev_for_dev_replace) {
8170                         ret = find_free_dev_extent(trans, device, min_free,
8171                                                    &dev_offset, NULL);
8172                         if (!ret)
8173                                 dev_nr++;
8174
8175                         if (dev_nr >= dev_min)
8176                                 break;
8177
8178                         ret = -1;
8179                 }
8180         }
8181         mutex_unlock(&root->fs_info->chunk_mutex);
8182         btrfs_end_transaction(trans, root);
8183 out:
8184         btrfs_put_block_group(block_group);
8185         return ret;
8186 }
8187
8188 static int find_first_block_group(struct btrfs_root *root,
8189                 struct btrfs_path *path, struct btrfs_key *key)
8190 {
8191         int ret = 0;
8192         struct btrfs_key found_key;
8193         struct extent_buffer *leaf;
8194         int slot;
8195
8196         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8197         if (ret < 0)
8198                 goto out;
8199
8200         while (1) {
8201                 slot = path->slots[0];
8202                 leaf = path->nodes[0];
8203                 if (slot >= btrfs_header_nritems(leaf)) {
8204                         ret = btrfs_next_leaf(root, path);
8205                         if (ret == 0)
8206                                 continue;
8207                         if (ret < 0)
8208                                 goto out;
8209                         break;
8210                 }
8211                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8212
8213                 if (found_key.objectid >= key->objectid &&
8214                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8215                         ret = 0;
8216                         goto out;
8217                 }
8218                 path->slots[0]++;
8219         }
8220 out:
8221         return ret;
8222 }
8223
8224 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8225 {
8226         struct btrfs_block_group_cache *block_group;
8227         u64 last = 0;
8228
8229         while (1) {
8230                 struct inode *inode;
8231
8232                 block_group = btrfs_lookup_first_block_group(info, last);
8233                 while (block_group) {
8234                         spin_lock(&block_group->lock);
8235                         if (block_group->iref)
8236                                 break;
8237                         spin_unlock(&block_group->lock);
8238                         block_group = next_block_group(info->tree_root,
8239                                                        block_group);
8240                 }
8241                 if (!block_group) {
8242                         if (last == 0)
8243                                 break;
8244                         last = 0;
8245                         continue;
8246                 }
8247
8248                 inode = block_group->inode;
8249                 block_group->iref = 0;
8250                 block_group->inode = NULL;
8251                 spin_unlock(&block_group->lock);
8252                 iput(inode);
8253                 last = block_group->key.objectid + block_group->key.offset;
8254                 btrfs_put_block_group(block_group);
8255         }
8256 }
8257
8258 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8259 {
8260         struct btrfs_block_group_cache *block_group;
8261         struct btrfs_space_info *space_info;
8262         struct btrfs_caching_control *caching_ctl;
8263         struct rb_node *n;
8264
8265         down_write(&info->extent_commit_sem);
8266         while (!list_empty(&info->caching_block_groups)) {
8267                 caching_ctl = list_entry(info->caching_block_groups.next,
8268                                          struct btrfs_caching_control, list);
8269                 list_del(&caching_ctl->list);
8270                 put_caching_control(caching_ctl);
8271         }
8272         up_write(&info->extent_commit_sem);
8273
8274         spin_lock(&info->block_group_cache_lock);
8275         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8276                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8277                                        cache_node);
8278                 rb_erase(&block_group->cache_node,
8279                          &info->block_group_cache_tree);
8280                 spin_unlock(&info->block_group_cache_lock);
8281
8282                 down_write(&block_group->space_info->groups_sem);
8283                 list_del(&block_group->list);
8284                 up_write(&block_group->space_info->groups_sem);
8285
8286                 if (block_group->cached == BTRFS_CACHE_STARTED)
8287                         wait_block_group_cache_done(block_group);
8288
8289                 /*
8290                  * We haven't cached this block group, which means we could
8291                  * possibly have excluded extents on this block group.
8292                  */
8293                 if (block_group->cached == BTRFS_CACHE_NO ||
8294                     block_group->cached == BTRFS_CACHE_ERROR)
8295                         free_excluded_extents(info->extent_root, block_group);
8296
8297                 btrfs_remove_free_space_cache(block_group);
8298                 btrfs_put_block_group(block_group);
8299
8300                 spin_lock(&info->block_group_cache_lock);
8301         }
8302         spin_unlock(&info->block_group_cache_lock);
8303
8304         /* now that all the block groups are freed, go through and
8305          * free all the space_info structs.  This is only called during
8306          * the final stages of unmount, and so we know nobody is
8307          * using them.  We call synchronize_rcu() once before we start,
8308          * just to be on the safe side.
8309          */
8310         synchronize_rcu();
8311
8312         release_global_block_rsv(info);
8313
8314         while (!list_empty(&info->space_info)) {
8315                 int i;
8316
8317                 space_info = list_entry(info->space_info.next,
8318                                         struct btrfs_space_info,
8319                                         list);
8320                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8321                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8322                             space_info->bytes_reserved > 0 ||
8323                             space_info->bytes_may_use > 0)) {
8324                                 dump_space_info(space_info, 0, 0);
8325                         }
8326                 }
8327                 list_del(&space_info->list);
8328                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8329                         struct kobject *kobj;
8330                         kobj = &space_info->block_group_kobjs[i];
8331                         if (kobj->parent) {
8332                                 kobject_del(kobj);
8333                                 kobject_put(kobj);
8334                         }
8335                 }
8336                 kobject_del(&space_info->kobj);
8337                 kobject_put(&space_info->kobj);
8338         }
8339         return 0;
8340 }
8341
8342 static void __link_block_group(struct btrfs_space_info *space_info,
8343                                struct btrfs_block_group_cache *cache)
8344 {
8345         int index = get_block_group_index(cache);
8346
8347         down_write(&space_info->groups_sem);
8348         if (list_empty(&space_info->block_groups[index])) {
8349                 struct kobject *kobj = &space_info->block_group_kobjs[index];
8350                 int ret;
8351
8352                 kobject_get(&space_info->kobj); /* put in release */
8353                 ret = kobject_add(kobj, &space_info->kobj, "%s",
8354                                   get_raid_name(index));
8355                 if (ret) {
8356                         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8357                         kobject_put(&space_info->kobj);
8358                 }
8359         }
8360         list_add_tail(&cache->list, &space_info->block_groups[index]);
8361         up_write(&space_info->groups_sem);
8362 }
8363
8364 static struct btrfs_block_group_cache *
8365 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8366 {
8367         struct btrfs_block_group_cache *cache;
8368
8369         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8370         if (!cache)
8371                 return NULL;
8372
8373         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8374                                         GFP_NOFS);
8375         if (!cache->free_space_ctl) {
8376                 kfree(cache);
8377                 return NULL;
8378         }
8379
8380         cache->key.objectid = start;
8381         cache->key.offset = size;
8382         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8383
8384         cache->sectorsize = root->sectorsize;
8385         cache->fs_info = root->fs_info;
8386         cache->full_stripe_len = btrfs_full_stripe_len(root,
8387                                                &root->fs_info->mapping_tree,
8388                                                start);
8389         atomic_set(&cache->count, 1);
8390         spin_lock_init(&cache->lock);
8391         INIT_LIST_HEAD(&cache->list);
8392         INIT_LIST_HEAD(&cache->cluster_list);
8393         INIT_LIST_HEAD(&cache->new_bg_list);
8394         btrfs_init_free_space_ctl(cache);
8395
8396         return cache;
8397 }
8398
8399 int btrfs_read_block_groups(struct btrfs_root *root)
8400 {
8401         struct btrfs_path *path;
8402         int ret;
8403         struct btrfs_block_group_cache *cache;
8404         struct btrfs_fs_info *info = root->fs_info;
8405         struct btrfs_space_info *space_info;
8406         struct btrfs_key key;
8407         struct btrfs_key found_key;
8408         struct extent_buffer *leaf;
8409         int need_clear = 0;
8410         u64 cache_gen;
8411
8412         root = info->extent_root;
8413         key.objectid = 0;
8414         key.offset = 0;
8415         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8416         path = btrfs_alloc_path();
8417         if (!path)
8418                 return -ENOMEM;
8419         path->reada = 1;
8420
8421         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8422         if (btrfs_test_opt(root, SPACE_CACHE) &&
8423             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8424                 need_clear = 1;
8425         if (btrfs_test_opt(root, CLEAR_CACHE))
8426                 need_clear = 1;
8427
8428         while (1) {
8429                 ret = find_first_block_group(root, path, &key);
8430                 if (ret > 0)
8431                         break;
8432                 if (ret != 0)
8433                         goto error;
8434
8435                 leaf = path->nodes[0];
8436                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8437
8438                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
8439                                                        found_key.offset);
8440                 if (!cache) {
8441                         ret = -ENOMEM;
8442                         goto error;
8443                 }
8444
8445                 if (need_clear) {
8446                         /*
8447                          * When we mount with old space cache, we need to
8448                          * set BTRFS_DC_CLEAR and set dirty flag.
8449                          *
8450                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8451                          *    truncate the old free space cache inode and
8452                          *    setup a new one.
8453                          * b) Setting 'dirty flag' makes sure that we flush
8454                          *    the new space cache info onto disk.
8455                          */
8456                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8457                         if (btrfs_test_opt(root, SPACE_CACHE))
8458                                 cache->dirty = 1;
8459                 }
8460
8461                 read_extent_buffer(leaf, &cache->item,
8462                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8463                                    sizeof(cache->item));
8464                 cache->flags = btrfs_block_group_flags(&cache->item);
8465
8466                 key.objectid = found_key.objectid + found_key.offset;
8467                 btrfs_release_path(path);
8468
8469                 /*
8470                  * We need to exclude the super stripes now so that the space
8471                  * info has super bytes accounted for, otherwise we'll think
8472                  * we have more space than we actually do.
8473                  */
8474                 ret = exclude_super_stripes(root, cache);
8475                 if (ret) {
8476                         /*
8477                          * We may have excluded something, so call this just in
8478                          * case.
8479                          */
8480                         free_excluded_extents(root, cache);
8481                         btrfs_put_block_group(cache);
8482                         goto error;
8483                 }
8484
8485                 /*
8486                  * check for two cases, either we are full, and therefore
8487                  * don't need to bother with the caching work since we won't
8488                  * find any space, or we are empty, and we can just add all
8489                  * the space in and be done with it.  This saves us _alot_ of
8490                  * time, particularly in the full case.
8491                  */
8492                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8493                         cache->last_byte_to_unpin = (u64)-1;
8494                         cache->cached = BTRFS_CACHE_FINISHED;
8495                         free_excluded_extents(root, cache);
8496                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8497                         cache->last_byte_to_unpin = (u64)-1;
8498                         cache->cached = BTRFS_CACHE_FINISHED;
8499                         add_new_free_space(cache, root->fs_info,
8500                                            found_key.objectid,
8501                                            found_key.objectid +
8502                                            found_key.offset);
8503                         free_excluded_extents(root, cache);
8504                 }
8505
8506                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8507                 if (ret) {
8508                         btrfs_remove_free_space_cache(cache);
8509                         btrfs_put_block_group(cache);
8510                         goto error;
8511                 }
8512
8513                 ret = update_space_info(info, cache->flags, found_key.offset,
8514                                         btrfs_block_group_used(&cache->item),
8515                                         &space_info);
8516                 if (ret) {
8517                         btrfs_remove_free_space_cache(cache);
8518                         spin_lock(&info->block_group_cache_lock);
8519                         rb_erase(&cache->cache_node,
8520                                  &info->block_group_cache_tree);
8521                         spin_unlock(&info->block_group_cache_lock);
8522                         btrfs_put_block_group(cache);
8523                         goto error;
8524                 }
8525
8526                 cache->space_info = space_info;
8527                 spin_lock(&cache->space_info->lock);
8528                 cache->space_info->bytes_readonly += cache->bytes_super;
8529                 spin_unlock(&cache->space_info->lock);
8530
8531                 __link_block_group(space_info, cache);
8532
8533                 set_avail_alloc_bits(root->fs_info, cache->flags);
8534                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8535                         set_block_group_ro(cache, 1);
8536         }
8537
8538         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8539                 if (!(get_alloc_profile(root, space_info->flags) &
8540                       (BTRFS_BLOCK_GROUP_RAID10 |
8541                        BTRFS_BLOCK_GROUP_RAID1 |
8542                        BTRFS_BLOCK_GROUP_RAID5 |
8543                        BTRFS_BLOCK_GROUP_RAID6 |
8544                        BTRFS_BLOCK_GROUP_DUP)))
8545                         continue;
8546                 /*
8547                  * avoid allocating from un-mirrored block group if there are
8548                  * mirrored block groups.
8549                  */
8550                 list_for_each_entry(cache,
8551                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8552                                 list)
8553                         set_block_group_ro(cache, 1);
8554                 list_for_each_entry(cache,
8555                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8556                                 list)
8557                         set_block_group_ro(cache, 1);
8558         }
8559
8560         init_global_block_rsv(info);
8561         ret = 0;
8562 error:
8563         btrfs_free_path(path);
8564         return ret;
8565 }
8566
8567 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8568                                        struct btrfs_root *root)
8569 {
8570         struct btrfs_block_group_cache *block_group, *tmp;
8571         struct btrfs_root *extent_root = root->fs_info->extent_root;
8572         struct btrfs_block_group_item item;
8573         struct btrfs_key key;
8574         int ret = 0;
8575
8576         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8577                                  new_bg_list) {
8578                 list_del_init(&block_group->new_bg_list);
8579
8580                 if (ret)
8581                         continue;
8582
8583                 spin_lock(&block_group->lock);
8584                 memcpy(&item, &block_group->item, sizeof(item));
8585                 memcpy(&key, &block_group->key, sizeof(key));
8586                 spin_unlock(&block_group->lock);
8587
8588                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8589                                         sizeof(item));
8590                 if (ret)
8591                         btrfs_abort_transaction(trans, extent_root, ret);
8592                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8593                                                key.objectid, key.offset);
8594                 if (ret)
8595                         btrfs_abort_transaction(trans, extent_root, ret);
8596         }
8597 }
8598
8599 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8600                            struct btrfs_root *root, u64 bytes_used,
8601                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8602                            u64 size)
8603 {
8604         int ret;
8605         struct btrfs_root *extent_root;
8606         struct btrfs_block_group_cache *cache;
8607
8608         extent_root = root->fs_info->extent_root;
8609
8610         root->fs_info->last_trans_log_full_commit = trans->transid;
8611
8612         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
8613         if (!cache)
8614                 return -ENOMEM;
8615
8616         btrfs_set_block_group_used(&cache->item, bytes_used);
8617         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8618         btrfs_set_block_group_flags(&cache->item, type);
8619
8620         cache->flags = type;
8621         cache->last_byte_to_unpin = (u64)-1;
8622         cache->cached = BTRFS_CACHE_FINISHED;
8623         ret = exclude_super_stripes(root, cache);
8624         if (ret) {
8625                 /*
8626                  * We may have excluded something, so call this just in
8627                  * case.
8628                  */
8629                 free_excluded_extents(root, cache);
8630                 btrfs_put_block_group(cache);
8631                 return ret;
8632         }
8633
8634         add_new_free_space(cache, root->fs_info, chunk_offset,
8635                            chunk_offset + size);
8636
8637         free_excluded_extents(root, cache);
8638
8639         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8640         if (ret) {
8641                 btrfs_remove_free_space_cache(cache);
8642                 btrfs_put_block_group(cache);
8643                 return ret;
8644         }
8645
8646         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8647                                 &cache->space_info);
8648         if (ret) {
8649                 btrfs_remove_free_space_cache(cache);
8650                 spin_lock(&root->fs_info->block_group_cache_lock);
8651                 rb_erase(&cache->cache_node,
8652                          &root->fs_info->block_group_cache_tree);
8653                 spin_unlock(&root->fs_info->block_group_cache_lock);
8654                 btrfs_put_block_group(cache);
8655                 return ret;
8656         }
8657         update_global_block_rsv(root->fs_info);
8658
8659         spin_lock(&cache->space_info->lock);
8660         cache->space_info->bytes_readonly += cache->bytes_super;
8661         spin_unlock(&cache->space_info->lock);
8662
8663         __link_block_group(cache->space_info, cache);
8664
8665         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8666
8667         set_avail_alloc_bits(extent_root->fs_info, type);
8668
8669         return 0;
8670 }
8671
8672 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8673 {
8674         u64 extra_flags = chunk_to_extended(flags) &
8675                                 BTRFS_EXTENDED_PROFILE_MASK;
8676
8677         write_seqlock(&fs_info->profiles_lock);
8678         if (flags & BTRFS_BLOCK_GROUP_DATA)
8679                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8680         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8681                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8682         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8683                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8684         write_sequnlock(&fs_info->profiles_lock);
8685 }
8686
8687 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8688                              struct btrfs_root *root, u64 group_start)
8689 {
8690         struct btrfs_path *path;
8691         struct btrfs_block_group_cache *block_group;
8692         struct btrfs_free_cluster *cluster;
8693         struct btrfs_root *tree_root = root->fs_info->tree_root;
8694         struct btrfs_key key;
8695         struct inode *inode;
8696         int ret;
8697         int index;
8698         int factor;
8699
8700         root = root->fs_info->extent_root;
8701
8702         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8703         BUG_ON(!block_group);
8704         BUG_ON(!block_group->ro);
8705
8706         /*
8707          * Free the reserved super bytes from this block group before
8708          * remove it.
8709          */
8710         free_excluded_extents(root, block_group);
8711
8712         memcpy(&key, &block_group->key, sizeof(key));
8713         index = get_block_group_index(block_group);
8714         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8715                                   BTRFS_BLOCK_GROUP_RAID1 |
8716                                   BTRFS_BLOCK_GROUP_RAID10))
8717                 factor = 2;
8718         else
8719                 factor = 1;
8720
8721         /* make sure this block group isn't part of an allocation cluster */
8722         cluster = &root->fs_info->data_alloc_cluster;
8723         spin_lock(&cluster->refill_lock);
8724         btrfs_return_cluster_to_free_space(block_group, cluster);
8725         spin_unlock(&cluster->refill_lock);
8726
8727         /*
8728          * make sure this block group isn't part of a metadata
8729          * allocation cluster
8730          */
8731         cluster = &root->fs_info->meta_alloc_cluster;
8732         spin_lock(&cluster->refill_lock);
8733         btrfs_return_cluster_to_free_space(block_group, cluster);
8734         spin_unlock(&cluster->refill_lock);
8735
8736         path = btrfs_alloc_path();
8737         if (!path) {
8738                 ret = -ENOMEM;
8739                 goto out;
8740         }
8741
8742         inode = lookup_free_space_inode(tree_root, block_group, path);
8743         if (!IS_ERR(inode)) {
8744                 ret = btrfs_orphan_add(trans, inode);
8745                 if (ret) {
8746                         btrfs_add_delayed_iput(inode);
8747                         goto out;
8748                 }
8749                 clear_nlink(inode);
8750                 /* One for the block groups ref */
8751                 spin_lock(&block_group->lock);
8752                 if (block_group->iref) {
8753                         block_group->iref = 0;
8754                         block_group->inode = NULL;
8755                         spin_unlock(&block_group->lock);
8756                         iput(inode);
8757                 } else {
8758                         spin_unlock(&block_group->lock);
8759                 }
8760                 /* One for our lookup ref */
8761                 btrfs_add_delayed_iput(inode);
8762         }
8763
8764         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8765         key.offset = block_group->key.objectid;
8766         key.type = 0;
8767
8768         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8769         if (ret < 0)
8770                 goto out;
8771         if (ret > 0)
8772                 btrfs_release_path(path);
8773         if (ret == 0) {
8774                 ret = btrfs_del_item(trans, tree_root, path);
8775                 if (ret)
8776                         goto out;
8777                 btrfs_release_path(path);
8778         }
8779
8780         spin_lock(&root->fs_info->block_group_cache_lock);
8781         rb_erase(&block_group->cache_node,
8782                  &root->fs_info->block_group_cache_tree);
8783
8784         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8785                 root->fs_info->first_logical_byte = (u64)-1;
8786         spin_unlock(&root->fs_info->block_group_cache_lock);
8787
8788         down_write(&block_group->space_info->groups_sem);
8789         /*
8790          * we must use list_del_init so people can check to see if they
8791          * are still on the list after taking the semaphore
8792          */
8793         list_del_init(&block_group->list);
8794         if (list_empty(&block_group->space_info->block_groups[index])) {
8795                 kobject_del(&block_group->space_info->block_group_kobjs[index]);
8796                 kobject_put(&block_group->space_info->block_group_kobjs[index]);
8797                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8798         }
8799         up_write(&block_group->space_info->groups_sem);
8800
8801         if (block_group->cached == BTRFS_CACHE_STARTED)
8802                 wait_block_group_cache_done(block_group);
8803
8804         btrfs_remove_free_space_cache(block_group);
8805
8806         spin_lock(&block_group->space_info->lock);
8807         block_group->space_info->total_bytes -= block_group->key.offset;
8808         block_group->space_info->bytes_readonly -= block_group->key.offset;
8809         block_group->space_info->disk_total -= block_group->key.offset * factor;
8810         spin_unlock(&block_group->space_info->lock);
8811
8812         memcpy(&key, &block_group->key, sizeof(key));
8813
8814         btrfs_clear_space_info_full(root->fs_info);
8815
8816         btrfs_put_block_group(block_group);
8817         btrfs_put_block_group(block_group);
8818
8819         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8820         if (ret > 0)
8821                 ret = -EIO;
8822         if (ret < 0)
8823                 goto out;
8824
8825         ret = btrfs_del_item(trans, root, path);
8826 out:
8827         btrfs_free_path(path);
8828         return ret;
8829 }
8830
8831 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8832 {
8833         struct btrfs_space_info *space_info;
8834         struct btrfs_super_block *disk_super;
8835         u64 features;
8836         u64 flags;
8837         int mixed = 0;
8838         int ret;
8839
8840         disk_super = fs_info->super_copy;
8841         if (!btrfs_super_root(disk_super))
8842                 return 1;
8843
8844         features = btrfs_super_incompat_flags(disk_super);
8845         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8846                 mixed = 1;
8847
8848         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8849         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8850         if (ret)
8851                 goto out;
8852
8853         if (mixed) {
8854                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8855                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8856         } else {
8857                 flags = BTRFS_BLOCK_GROUP_METADATA;
8858                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8859                 if (ret)
8860                         goto out;
8861
8862                 flags = BTRFS_BLOCK_GROUP_DATA;
8863                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8864         }
8865 out:
8866         return ret;
8867 }
8868
8869 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8870 {
8871         return unpin_extent_range(root, start, end);
8872 }
8873
8874 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8875                                u64 num_bytes, u64 *actual_bytes)
8876 {
8877         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8878 }
8879
8880 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8881 {
8882         struct btrfs_fs_info *fs_info = root->fs_info;
8883         struct btrfs_block_group_cache *cache = NULL;
8884         u64 group_trimmed;
8885         u64 start;
8886         u64 end;
8887         u64 trimmed = 0;
8888         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8889         int ret = 0;
8890
8891         /*
8892          * try to trim all FS space, our block group may start from non-zero.
8893          */
8894         if (range->len == total_bytes)
8895                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8896         else
8897                 cache = btrfs_lookup_block_group(fs_info, range->start);
8898
8899         while (cache) {
8900                 if (cache->key.objectid >= (range->start + range->len)) {
8901                         btrfs_put_block_group(cache);
8902                         break;
8903                 }
8904
8905                 start = max(range->start, cache->key.objectid);
8906                 end = min(range->start + range->len,
8907                                 cache->key.objectid + cache->key.offset);
8908
8909                 if (end - start >= range->minlen) {
8910                         if (!block_group_cache_done(cache)) {
8911                                 ret = cache_block_group(cache, 0);
8912                                 if (ret) {
8913                                         btrfs_put_block_group(cache);
8914                                         break;
8915                                 }
8916                                 ret = wait_block_group_cache_done(cache);
8917                                 if (ret) {
8918                                         btrfs_put_block_group(cache);
8919                                         break;
8920                                 }
8921                         }
8922                         ret = btrfs_trim_block_group(cache,
8923                                                      &group_trimmed,
8924                                                      start,
8925                                                      end,
8926                                                      range->minlen);
8927
8928                         trimmed += group_trimmed;
8929                         if (ret) {
8930                                 btrfs_put_block_group(cache);
8931                                 break;
8932                         }
8933                 }
8934
8935                 cache = next_block_group(fs_info->tree_root, cache);
8936         }
8937
8938         range->len = trimmed;
8939         return ret;
8940 }