]> Pileus Git - ~andy/linux/blob - fs/btrfs/extent-tree.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs...
[~andy/linux] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37
38 #undef SCRAMBLE_DELAYED_REFS
39
40 /*
41  * control flags for do_chunk_alloc's force field
42  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
43  * if we really need one.
44  *
45  * CHUNK_ALLOC_LIMITED means to only try and allocate one
46  * if we have very few chunks already allocated.  This is
47  * used as part of the clustering code to help make sure
48  * we have a good pool of storage to cluster in, without
49  * filling the FS with empty chunks
50  *
51  * CHUNK_ALLOC_FORCE means it must try to allocate one
52  *
53  */
54 enum {
55         CHUNK_ALLOC_NO_FORCE = 0,
56         CHUNK_ALLOC_LIMITED = 1,
57         CHUNK_ALLOC_FORCE = 2,
58 };
59
60 /*
61  * Control how reservations are dealt with.
62  *
63  * RESERVE_FREE - freeing a reservation.
64  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
65  *   ENOSPC accounting
66  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
67  *   bytes_may_use as the ENOSPC accounting is done elsewhere
68  */
69 enum {
70         RESERVE_FREE = 0,
71         RESERVE_ALLOC = 1,
72         RESERVE_ALLOC_NO_ACCOUNT = 2,
73 };
74
75 static int update_block_group(struct btrfs_root *root,
76                               u64 bytenr, u64 num_bytes, int alloc);
77 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
78                                 struct btrfs_root *root,
79                                 u64 bytenr, u64 num_bytes, u64 parent,
80                                 u64 root_objectid, u64 owner_objectid,
81                                 u64 owner_offset, int refs_to_drop,
82                                 struct btrfs_delayed_extent_op *extra_op);
83 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
84                                     struct extent_buffer *leaf,
85                                     struct btrfs_extent_item *ei);
86 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
87                                       struct btrfs_root *root,
88                                       u64 parent, u64 root_objectid,
89                                       u64 flags, u64 owner, u64 offset,
90                                       struct btrfs_key *ins, int ref_mod);
91 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
92                                      struct btrfs_root *root,
93                                      u64 parent, u64 root_objectid,
94                                      u64 flags, struct btrfs_disk_key *key,
95                                      int level, struct btrfs_key *ins);
96 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
97                           struct btrfs_root *extent_root, u64 flags,
98                           int force);
99 static int find_next_key(struct btrfs_path *path, int level,
100                          struct btrfs_key *key);
101 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
102                             int dump_block_groups);
103 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
104                                        u64 num_bytes, int reserve);
105 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
106                                u64 num_bytes);
107
108 static noinline int
109 block_group_cache_done(struct btrfs_block_group_cache *cache)
110 {
111         smp_mb();
112         return cache->cached == BTRFS_CACHE_FINISHED;
113 }
114
115 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
116 {
117         return (cache->flags & bits) == bits;
118 }
119
120 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
121 {
122         atomic_inc(&cache->count);
123 }
124
125 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
126 {
127         if (atomic_dec_and_test(&cache->count)) {
128                 WARN_ON(cache->pinned > 0);
129                 WARN_ON(cache->reserved > 0);
130                 kfree(cache->free_space_ctl);
131                 kfree(cache);
132         }
133 }
134
135 /*
136  * this adds the block group to the fs_info rb tree for the block group
137  * cache
138  */
139 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
140                                 struct btrfs_block_group_cache *block_group)
141 {
142         struct rb_node **p;
143         struct rb_node *parent = NULL;
144         struct btrfs_block_group_cache *cache;
145
146         spin_lock(&info->block_group_cache_lock);
147         p = &info->block_group_cache_tree.rb_node;
148
149         while (*p) {
150                 parent = *p;
151                 cache = rb_entry(parent, struct btrfs_block_group_cache,
152                                  cache_node);
153                 if (block_group->key.objectid < cache->key.objectid) {
154                         p = &(*p)->rb_left;
155                 } else if (block_group->key.objectid > cache->key.objectid) {
156                         p = &(*p)->rb_right;
157                 } else {
158                         spin_unlock(&info->block_group_cache_lock);
159                         return -EEXIST;
160                 }
161         }
162
163         rb_link_node(&block_group->cache_node, parent, p);
164         rb_insert_color(&block_group->cache_node,
165                         &info->block_group_cache_tree);
166
167         if (info->first_logical_byte > block_group->key.objectid)
168                 info->first_logical_byte = block_group->key.objectid;
169
170         spin_unlock(&info->block_group_cache_lock);
171
172         return 0;
173 }
174
175 /*
176  * This will return the block group at or after bytenr if contains is 0, else
177  * it will return the block group that contains the bytenr
178  */
179 static struct btrfs_block_group_cache *
180 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
181                               int contains)
182 {
183         struct btrfs_block_group_cache *cache, *ret = NULL;
184         struct rb_node *n;
185         u64 end, start;
186
187         spin_lock(&info->block_group_cache_lock);
188         n = info->block_group_cache_tree.rb_node;
189
190         while (n) {
191                 cache = rb_entry(n, struct btrfs_block_group_cache,
192                                  cache_node);
193                 end = cache->key.objectid + cache->key.offset - 1;
194                 start = cache->key.objectid;
195
196                 if (bytenr < start) {
197                         if (!contains && (!ret || start < ret->key.objectid))
198                                 ret = cache;
199                         n = n->rb_left;
200                 } else if (bytenr > start) {
201                         if (contains && bytenr <= end) {
202                                 ret = cache;
203                                 break;
204                         }
205                         n = n->rb_right;
206                 } else {
207                         ret = cache;
208                         break;
209                 }
210         }
211         if (ret) {
212                 btrfs_get_block_group(ret);
213                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
214                         info->first_logical_byte = ret->key.objectid;
215         }
216         spin_unlock(&info->block_group_cache_lock);
217
218         return ret;
219 }
220
221 static int add_excluded_extent(struct btrfs_root *root,
222                                u64 start, u64 num_bytes)
223 {
224         u64 end = start + num_bytes - 1;
225         set_extent_bits(&root->fs_info->freed_extents[0],
226                         start, end, EXTENT_UPTODATE, GFP_NOFS);
227         set_extent_bits(&root->fs_info->freed_extents[1],
228                         start, end, EXTENT_UPTODATE, GFP_NOFS);
229         return 0;
230 }
231
232 static void free_excluded_extents(struct btrfs_root *root,
233                                   struct btrfs_block_group_cache *cache)
234 {
235         u64 start, end;
236
237         start = cache->key.objectid;
238         end = start + cache->key.offset - 1;
239
240         clear_extent_bits(&root->fs_info->freed_extents[0],
241                           start, end, EXTENT_UPTODATE, GFP_NOFS);
242         clear_extent_bits(&root->fs_info->freed_extents[1],
243                           start, end, EXTENT_UPTODATE, GFP_NOFS);
244 }
245
246 static int exclude_super_stripes(struct btrfs_root *root,
247                                  struct btrfs_block_group_cache *cache)
248 {
249         u64 bytenr;
250         u64 *logical;
251         int stripe_len;
252         int i, nr, ret;
253
254         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
255                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
256                 cache->bytes_super += stripe_len;
257                 ret = add_excluded_extent(root, cache->key.objectid,
258                                           stripe_len);
259                 BUG_ON(ret); /* -ENOMEM */
260         }
261
262         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
263                 bytenr = btrfs_sb_offset(i);
264                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
265                                        cache->key.objectid, bytenr,
266                                        0, &logical, &nr, &stripe_len);
267                 BUG_ON(ret); /* -ENOMEM */
268
269                 while (nr--) {
270                         cache->bytes_super += stripe_len;
271                         ret = add_excluded_extent(root, logical[nr],
272                                                   stripe_len);
273                         BUG_ON(ret); /* -ENOMEM */
274                 }
275
276                 kfree(logical);
277         }
278         return 0;
279 }
280
281 static struct btrfs_caching_control *
282 get_caching_control(struct btrfs_block_group_cache *cache)
283 {
284         struct btrfs_caching_control *ctl;
285
286         spin_lock(&cache->lock);
287         if (cache->cached != BTRFS_CACHE_STARTED) {
288                 spin_unlock(&cache->lock);
289                 return NULL;
290         }
291
292         /* We're loading it the fast way, so we don't have a caching_ctl. */
293         if (!cache->caching_ctl) {
294                 spin_unlock(&cache->lock);
295                 return NULL;
296         }
297
298         ctl = cache->caching_ctl;
299         atomic_inc(&ctl->count);
300         spin_unlock(&cache->lock);
301         return ctl;
302 }
303
304 static void put_caching_control(struct btrfs_caching_control *ctl)
305 {
306         if (atomic_dec_and_test(&ctl->count))
307                 kfree(ctl);
308 }
309
310 /*
311  * this is only called by cache_block_group, since we could have freed extents
312  * we need to check the pinned_extents for any extents that can't be used yet
313  * since their free space will be released as soon as the transaction commits.
314  */
315 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
316                               struct btrfs_fs_info *info, u64 start, u64 end)
317 {
318         u64 extent_start, extent_end, size, total_added = 0;
319         int ret;
320
321         while (start < end) {
322                 ret = find_first_extent_bit(info->pinned_extents, start,
323                                             &extent_start, &extent_end,
324                                             EXTENT_DIRTY | EXTENT_UPTODATE,
325                                             NULL);
326                 if (ret)
327                         break;
328
329                 if (extent_start <= start) {
330                         start = extent_end + 1;
331                 } else if (extent_start > start && extent_start < end) {
332                         size = extent_start - start;
333                         total_added += size;
334                         ret = btrfs_add_free_space(block_group, start,
335                                                    size);
336                         BUG_ON(ret); /* -ENOMEM or logic error */
337                         start = extent_end + 1;
338                 } else {
339                         break;
340                 }
341         }
342
343         if (start < end) {
344                 size = end - start;
345                 total_added += size;
346                 ret = btrfs_add_free_space(block_group, start, size);
347                 BUG_ON(ret); /* -ENOMEM or logic error */
348         }
349
350         return total_added;
351 }
352
353 static noinline void caching_thread(struct btrfs_work *work)
354 {
355         struct btrfs_block_group_cache *block_group;
356         struct btrfs_fs_info *fs_info;
357         struct btrfs_caching_control *caching_ctl;
358         struct btrfs_root *extent_root;
359         struct btrfs_path *path;
360         struct extent_buffer *leaf;
361         struct btrfs_key key;
362         u64 total_found = 0;
363         u64 last = 0;
364         u32 nritems;
365         int ret = 0;
366
367         caching_ctl = container_of(work, struct btrfs_caching_control, work);
368         block_group = caching_ctl->block_group;
369         fs_info = block_group->fs_info;
370         extent_root = fs_info->extent_root;
371
372         path = btrfs_alloc_path();
373         if (!path)
374                 goto out;
375
376         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
377
378         /*
379          * We don't want to deadlock with somebody trying to allocate a new
380          * extent for the extent root while also trying to search the extent
381          * root to add free space.  So we skip locking and search the commit
382          * root, since its read-only
383          */
384         path->skip_locking = 1;
385         path->search_commit_root = 1;
386         path->reada = 1;
387
388         key.objectid = last;
389         key.offset = 0;
390         key.type = BTRFS_EXTENT_ITEM_KEY;
391 again:
392         mutex_lock(&caching_ctl->mutex);
393         /* need to make sure the commit_root doesn't disappear */
394         down_read(&fs_info->extent_commit_sem);
395
396         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
397         if (ret < 0)
398                 goto err;
399
400         leaf = path->nodes[0];
401         nritems = btrfs_header_nritems(leaf);
402
403         while (1) {
404                 if (btrfs_fs_closing(fs_info) > 1) {
405                         last = (u64)-1;
406                         break;
407                 }
408
409                 if (path->slots[0] < nritems) {
410                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
411                 } else {
412                         ret = find_next_key(path, 0, &key);
413                         if (ret)
414                                 break;
415
416                         if (need_resched() ||
417                             btrfs_next_leaf(extent_root, path)) {
418                                 caching_ctl->progress = last;
419                                 btrfs_release_path(path);
420                                 up_read(&fs_info->extent_commit_sem);
421                                 mutex_unlock(&caching_ctl->mutex);
422                                 cond_resched();
423                                 goto again;
424                         }
425                         leaf = path->nodes[0];
426                         nritems = btrfs_header_nritems(leaf);
427                         continue;
428                 }
429
430                 if (key.objectid < block_group->key.objectid) {
431                         path->slots[0]++;
432                         continue;
433                 }
434
435                 if (key.objectid >= block_group->key.objectid +
436                     block_group->key.offset)
437                         break;
438
439                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
440                         total_found += add_new_free_space(block_group,
441                                                           fs_info, last,
442                                                           key.objectid);
443                         last = key.objectid + key.offset;
444
445                         if (total_found > (1024 * 1024 * 2)) {
446                                 total_found = 0;
447                                 wake_up(&caching_ctl->wait);
448                         }
449                 }
450                 path->slots[0]++;
451         }
452         ret = 0;
453
454         total_found += add_new_free_space(block_group, fs_info, last,
455                                           block_group->key.objectid +
456                                           block_group->key.offset);
457         caching_ctl->progress = (u64)-1;
458
459         spin_lock(&block_group->lock);
460         block_group->caching_ctl = NULL;
461         block_group->cached = BTRFS_CACHE_FINISHED;
462         spin_unlock(&block_group->lock);
463
464 err:
465         btrfs_free_path(path);
466         up_read(&fs_info->extent_commit_sem);
467
468         free_excluded_extents(extent_root, block_group);
469
470         mutex_unlock(&caching_ctl->mutex);
471 out:
472         wake_up(&caching_ctl->wait);
473
474         put_caching_control(caching_ctl);
475         btrfs_put_block_group(block_group);
476 }
477
478 static int cache_block_group(struct btrfs_block_group_cache *cache,
479                              int load_cache_only)
480 {
481         DEFINE_WAIT(wait);
482         struct btrfs_fs_info *fs_info = cache->fs_info;
483         struct btrfs_caching_control *caching_ctl;
484         int ret = 0;
485
486         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
487         if (!caching_ctl)
488                 return -ENOMEM;
489
490         INIT_LIST_HEAD(&caching_ctl->list);
491         mutex_init(&caching_ctl->mutex);
492         init_waitqueue_head(&caching_ctl->wait);
493         caching_ctl->block_group = cache;
494         caching_ctl->progress = cache->key.objectid;
495         atomic_set(&caching_ctl->count, 1);
496         caching_ctl->work.func = caching_thread;
497
498         spin_lock(&cache->lock);
499         /*
500          * This should be a rare occasion, but this could happen I think in the
501          * case where one thread starts to load the space cache info, and then
502          * some other thread starts a transaction commit which tries to do an
503          * allocation while the other thread is still loading the space cache
504          * info.  The previous loop should have kept us from choosing this block
505          * group, but if we've moved to the state where we will wait on caching
506          * block groups we need to first check if we're doing a fast load here,
507          * so we can wait for it to finish, otherwise we could end up allocating
508          * from a block group who's cache gets evicted for one reason or
509          * another.
510          */
511         while (cache->cached == BTRFS_CACHE_FAST) {
512                 struct btrfs_caching_control *ctl;
513
514                 ctl = cache->caching_ctl;
515                 atomic_inc(&ctl->count);
516                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
517                 spin_unlock(&cache->lock);
518
519                 schedule();
520
521                 finish_wait(&ctl->wait, &wait);
522                 put_caching_control(ctl);
523                 spin_lock(&cache->lock);
524         }
525
526         if (cache->cached != BTRFS_CACHE_NO) {
527                 spin_unlock(&cache->lock);
528                 kfree(caching_ctl);
529                 return 0;
530         }
531         WARN_ON(cache->caching_ctl);
532         cache->caching_ctl = caching_ctl;
533         cache->cached = BTRFS_CACHE_FAST;
534         spin_unlock(&cache->lock);
535
536         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
537                 ret = load_free_space_cache(fs_info, cache);
538
539                 spin_lock(&cache->lock);
540                 if (ret == 1) {
541                         cache->caching_ctl = NULL;
542                         cache->cached = BTRFS_CACHE_FINISHED;
543                         cache->last_byte_to_unpin = (u64)-1;
544                 } else {
545                         if (load_cache_only) {
546                                 cache->caching_ctl = NULL;
547                                 cache->cached = BTRFS_CACHE_NO;
548                         } else {
549                                 cache->cached = BTRFS_CACHE_STARTED;
550                         }
551                 }
552                 spin_unlock(&cache->lock);
553                 wake_up(&caching_ctl->wait);
554                 if (ret == 1) {
555                         put_caching_control(caching_ctl);
556                         free_excluded_extents(fs_info->extent_root, cache);
557                         return 0;
558                 }
559         } else {
560                 /*
561                  * We are not going to do the fast caching, set cached to the
562                  * appropriate value and wakeup any waiters.
563                  */
564                 spin_lock(&cache->lock);
565                 if (load_cache_only) {
566                         cache->caching_ctl = NULL;
567                         cache->cached = BTRFS_CACHE_NO;
568                 } else {
569                         cache->cached = BTRFS_CACHE_STARTED;
570                 }
571                 spin_unlock(&cache->lock);
572                 wake_up(&caching_ctl->wait);
573         }
574
575         if (load_cache_only) {
576                 put_caching_control(caching_ctl);
577                 return 0;
578         }
579
580         down_write(&fs_info->extent_commit_sem);
581         atomic_inc(&caching_ctl->count);
582         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
583         up_write(&fs_info->extent_commit_sem);
584
585         btrfs_get_block_group(cache);
586
587         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
588
589         return ret;
590 }
591
592 /*
593  * return the block group that starts at or after bytenr
594  */
595 static struct btrfs_block_group_cache *
596 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
597 {
598         struct btrfs_block_group_cache *cache;
599
600         cache = block_group_cache_tree_search(info, bytenr, 0);
601
602         return cache;
603 }
604
605 /*
606  * return the block group that contains the given bytenr
607  */
608 struct btrfs_block_group_cache *btrfs_lookup_block_group(
609                                                  struct btrfs_fs_info *info,
610                                                  u64 bytenr)
611 {
612         struct btrfs_block_group_cache *cache;
613
614         cache = block_group_cache_tree_search(info, bytenr, 1);
615
616         return cache;
617 }
618
619 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
620                                                   u64 flags)
621 {
622         struct list_head *head = &info->space_info;
623         struct btrfs_space_info *found;
624
625         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
626
627         rcu_read_lock();
628         list_for_each_entry_rcu(found, head, list) {
629                 if (found->flags & flags) {
630                         rcu_read_unlock();
631                         return found;
632                 }
633         }
634         rcu_read_unlock();
635         return NULL;
636 }
637
638 /*
639  * after adding space to the filesystem, we need to clear the full flags
640  * on all the space infos.
641  */
642 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
643 {
644         struct list_head *head = &info->space_info;
645         struct btrfs_space_info *found;
646
647         rcu_read_lock();
648         list_for_each_entry_rcu(found, head, list)
649                 found->full = 0;
650         rcu_read_unlock();
651 }
652
653 u64 btrfs_find_block_group(struct btrfs_root *root,
654                            u64 search_start, u64 search_hint, int owner)
655 {
656         struct btrfs_block_group_cache *cache;
657         u64 used;
658         u64 last = max(search_hint, search_start);
659         u64 group_start = 0;
660         int full_search = 0;
661         int factor = 9;
662         int wrapped = 0;
663 again:
664         while (1) {
665                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
666                 if (!cache)
667                         break;
668
669                 spin_lock(&cache->lock);
670                 last = cache->key.objectid + cache->key.offset;
671                 used = btrfs_block_group_used(&cache->item);
672
673                 if ((full_search || !cache->ro) &&
674                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
675                         if (used + cache->pinned + cache->reserved <
676                             div_factor(cache->key.offset, factor)) {
677                                 group_start = cache->key.objectid;
678                                 spin_unlock(&cache->lock);
679                                 btrfs_put_block_group(cache);
680                                 goto found;
681                         }
682                 }
683                 spin_unlock(&cache->lock);
684                 btrfs_put_block_group(cache);
685                 cond_resched();
686         }
687         if (!wrapped) {
688                 last = search_start;
689                 wrapped = 1;
690                 goto again;
691         }
692         if (!full_search && factor < 10) {
693                 last = search_start;
694                 full_search = 1;
695                 factor = 10;
696                 goto again;
697         }
698 found:
699         return group_start;
700 }
701
702 /* simple helper to search for an existing extent at a given offset */
703 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
704 {
705         int ret;
706         struct btrfs_key key;
707         struct btrfs_path *path;
708
709         path = btrfs_alloc_path();
710         if (!path)
711                 return -ENOMEM;
712
713         key.objectid = start;
714         key.offset = len;
715         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
716         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
717                                 0, 0);
718         btrfs_free_path(path);
719         return ret;
720 }
721
722 /*
723  * helper function to lookup reference count and flags of extent.
724  *
725  * the head node for delayed ref is used to store the sum of all the
726  * reference count modifications queued up in the rbtree. the head
727  * node may also store the extent flags to set. This way you can check
728  * to see what the reference count and extent flags would be if all of
729  * the delayed refs are not processed.
730  */
731 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
732                              struct btrfs_root *root, u64 bytenr,
733                              u64 num_bytes, u64 *refs, u64 *flags)
734 {
735         struct btrfs_delayed_ref_head *head;
736         struct btrfs_delayed_ref_root *delayed_refs;
737         struct btrfs_path *path;
738         struct btrfs_extent_item *ei;
739         struct extent_buffer *leaf;
740         struct btrfs_key key;
741         u32 item_size;
742         u64 num_refs;
743         u64 extent_flags;
744         int ret;
745
746         path = btrfs_alloc_path();
747         if (!path)
748                 return -ENOMEM;
749
750         key.objectid = bytenr;
751         key.type = BTRFS_EXTENT_ITEM_KEY;
752         key.offset = num_bytes;
753         if (!trans) {
754                 path->skip_locking = 1;
755                 path->search_commit_root = 1;
756         }
757 again:
758         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
759                                 &key, path, 0, 0);
760         if (ret < 0)
761                 goto out_free;
762
763         if (ret == 0) {
764                 leaf = path->nodes[0];
765                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
766                 if (item_size >= sizeof(*ei)) {
767                         ei = btrfs_item_ptr(leaf, path->slots[0],
768                                             struct btrfs_extent_item);
769                         num_refs = btrfs_extent_refs(leaf, ei);
770                         extent_flags = btrfs_extent_flags(leaf, ei);
771                 } else {
772 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
773                         struct btrfs_extent_item_v0 *ei0;
774                         BUG_ON(item_size != sizeof(*ei0));
775                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
776                                              struct btrfs_extent_item_v0);
777                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
778                         /* FIXME: this isn't correct for data */
779                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
780 #else
781                         BUG();
782 #endif
783                 }
784                 BUG_ON(num_refs == 0);
785         } else {
786                 num_refs = 0;
787                 extent_flags = 0;
788                 ret = 0;
789         }
790
791         if (!trans)
792                 goto out;
793
794         delayed_refs = &trans->transaction->delayed_refs;
795         spin_lock(&delayed_refs->lock);
796         head = btrfs_find_delayed_ref_head(trans, bytenr);
797         if (head) {
798                 if (!mutex_trylock(&head->mutex)) {
799                         atomic_inc(&head->node.refs);
800                         spin_unlock(&delayed_refs->lock);
801
802                         btrfs_release_path(path);
803
804                         /*
805                          * Mutex was contended, block until it's released and try
806                          * again
807                          */
808                         mutex_lock(&head->mutex);
809                         mutex_unlock(&head->mutex);
810                         btrfs_put_delayed_ref(&head->node);
811                         goto again;
812                 }
813                 if (head->extent_op && head->extent_op->update_flags)
814                         extent_flags |= head->extent_op->flags_to_set;
815                 else
816                         BUG_ON(num_refs == 0);
817
818                 num_refs += head->node.ref_mod;
819                 mutex_unlock(&head->mutex);
820         }
821         spin_unlock(&delayed_refs->lock);
822 out:
823         WARN_ON(num_refs == 0);
824         if (refs)
825                 *refs = num_refs;
826         if (flags)
827                 *flags = extent_flags;
828 out_free:
829         btrfs_free_path(path);
830         return ret;
831 }
832
833 /*
834  * Back reference rules.  Back refs have three main goals:
835  *
836  * 1) differentiate between all holders of references to an extent so that
837  *    when a reference is dropped we can make sure it was a valid reference
838  *    before freeing the extent.
839  *
840  * 2) Provide enough information to quickly find the holders of an extent
841  *    if we notice a given block is corrupted or bad.
842  *
843  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
844  *    maintenance.  This is actually the same as #2, but with a slightly
845  *    different use case.
846  *
847  * There are two kinds of back refs. The implicit back refs is optimized
848  * for pointers in non-shared tree blocks. For a given pointer in a block,
849  * back refs of this kind provide information about the block's owner tree
850  * and the pointer's key. These information allow us to find the block by
851  * b-tree searching. The full back refs is for pointers in tree blocks not
852  * referenced by their owner trees. The location of tree block is recorded
853  * in the back refs. Actually the full back refs is generic, and can be
854  * used in all cases the implicit back refs is used. The major shortcoming
855  * of the full back refs is its overhead. Every time a tree block gets
856  * COWed, we have to update back refs entry for all pointers in it.
857  *
858  * For a newly allocated tree block, we use implicit back refs for
859  * pointers in it. This means most tree related operations only involve
860  * implicit back refs. For a tree block created in old transaction, the
861  * only way to drop a reference to it is COW it. So we can detect the
862  * event that tree block loses its owner tree's reference and do the
863  * back refs conversion.
864  *
865  * When a tree block is COW'd through a tree, there are four cases:
866  *
867  * The reference count of the block is one and the tree is the block's
868  * owner tree. Nothing to do in this case.
869  *
870  * The reference count of the block is one and the tree is not the
871  * block's owner tree. In this case, full back refs is used for pointers
872  * in the block. Remove these full back refs, add implicit back refs for
873  * every pointers in the new block.
874  *
875  * The reference count of the block is greater than one and the tree is
876  * the block's owner tree. In this case, implicit back refs is used for
877  * pointers in the block. Add full back refs for every pointers in the
878  * block, increase lower level extents' reference counts. The original
879  * implicit back refs are entailed to the new block.
880  *
881  * The reference count of the block is greater than one and the tree is
882  * not the block's owner tree. Add implicit back refs for every pointer in
883  * the new block, increase lower level extents' reference count.
884  *
885  * Back Reference Key composing:
886  *
887  * The key objectid corresponds to the first byte in the extent,
888  * The key type is used to differentiate between types of back refs.
889  * There are different meanings of the key offset for different types
890  * of back refs.
891  *
892  * File extents can be referenced by:
893  *
894  * - multiple snapshots, subvolumes, or different generations in one subvol
895  * - different files inside a single subvolume
896  * - different offsets inside a file (bookend extents in file.c)
897  *
898  * The extent ref structure for the implicit back refs has fields for:
899  *
900  * - Objectid of the subvolume root
901  * - objectid of the file holding the reference
902  * - original offset in the file
903  * - how many bookend extents
904  *
905  * The key offset for the implicit back refs is hash of the first
906  * three fields.
907  *
908  * The extent ref structure for the full back refs has field for:
909  *
910  * - number of pointers in the tree leaf
911  *
912  * The key offset for the implicit back refs is the first byte of
913  * the tree leaf
914  *
915  * When a file extent is allocated, The implicit back refs is used.
916  * the fields are filled in:
917  *
918  *     (root_key.objectid, inode objectid, offset in file, 1)
919  *
920  * When a file extent is removed file truncation, we find the
921  * corresponding implicit back refs and check the following fields:
922  *
923  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
924  *
925  * Btree extents can be referenced by:
926  *
927  * - Different subvolumes
928  *
929  * Both the implicit back refs and the full back refs for tree blocks
930  * only consist of key. The key offset for the implicit back refs is
931  * objectid of block's owner tree. The key offset for the full back refs
932  * is the first byte of parent block.
933  *
934  * When implicit back refs is used, information about the lowest key and
935  * level of the tree block are required. These information are stored in
936  * tree block info structure.
937  */
938
939 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
940 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
941                                   struct btrfs_root *root,
942                                   struct btrfs_path *path,
943                                   u64 owner, u32 extra_size)
944 {
945         struct btrfs_extent_item *item;
946         struct btrfs_extent_item_v0 *ei0;
947         struct btrfs_extent_ref_v0 *ref0;
948         struct btrfs_tree_block_info *bi;
949         struct extent_buffer *leaf;
950         struct btrfs_key key;
951         struct btrfs_key found_key;
952         u32 new_size = sizeof(*item);
953         u64 refs;
954         int ret;
955
956         leaf = path->nodes[0];
957         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
958
959         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
960         ei0 = btrfs_item_ptr(leaf, path->slots[0],
961                              struct btrfs_extent_item_v0);
962         refs = btrfs_extent_refs_v0(leaf, ei0);
963
964         if (owner == (u64)-1) {
965                 while (1) {
966                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
967                                 ret = btrfs_next_leaf(root, path);
968                                 if (ret < 0)
969                                         return ret;
970                                 BUG_ON(ret > 0); /* Corruption */
971                                 leaf = path->nodes[0];
972                         }
973                         btrfs_item_key_to_cpu(leaf, &found_key,
974                                               path->slots[0]);
975                         BUG_ON(key.objectid != found_key.objectid);
976                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
977                                 path->slots[0]++;
978                                 continue;
979                         }
980                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
981                                               struct btrfs_extent_ref_v0);
982                         owner = btrfs_ref_objectid_v0(leaf, ref0);
983                         break;
984                 }
985         }
986         btrfs_release_path(path);
987
988         if (owner < BTRFS_FIRST_FREE_OBJECTID)
989                 new_size += sizeof(*bi);
990
991         new_size -= sizeof(*ei0);
992         ret = btrfs_search_slot(trans, root, &key, path,
993                                 new_size + extra_size, 1);
994         if (ret < 0)
995                 return ret;
996         BUG_ON(ret); /* Corruption */
997
998         btrfs_extend_item(trans, root, path, new_size);
999
1000         leaf = path->nodes[0];
1001         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1002         btrfs_set_extent_refs(leaf, item, refs);
1003         /* FIXME: get real generation */
1004         btrfs_set_extent_generation(leaf, item, 0);
1005         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1006                 btrfs_set_extent_flags(leaf, item,
1007                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1008                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1009                 bi = (struct btrfs_tree_block_info *)(item + 1);
1010                 /* FIXME: get first key of the block */
1011                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1012                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1013         } else {
1014                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1015         }
1016         btrfs_mark_buffer_dirty(leaf);
1017         return 0;
1018 }
1019 #endif
1020
1021 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1022 {
1023         u32 high_crc = ~(u32)0;
1024         u32 low_crc = ~(u32)0;
1025         __le64 lenum;
1026
1027         lenum = cpu_to_le64(root_objectid);
1028         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1029         lenum = cpu_to_le64(owner);
1030         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1031         lenum = cpu_to_le64(offset);
1032         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1033
1034         return ((u64)high_crc << 31) ^ (u64)low_crc;
1035 }
1036
1037 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1038                                      struct btrfs_extent_data_ref *ref)
1039 {
1040         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1041                                     btrfs_extent_data_ref_objectid(leaf, ref),
1042                                     btrfs_extent_data_ref_offset(leaf, ref));
1043 }
1044
1045 static int match_extent_data_ref(struct extent_buffer *leaf,
1046                                  struct btrfs_extent_data_ref *ref,
1047                                  u64 root_objectid, u64 owner, u64 offset)
1048 {
1049         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1050             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1051             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1052                 return 0;
1053         return 1;
1054 }
1055
1056 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1057                                            struct btrfs_root *root,
1058                                            struct btrfs_path *path,
1059                                            u64 bytenr, u64 parent,
1060                                            u64 root_objectid,
1061                                            u64 owner, u64 offset)
1062 {
1063         struct btrfs_key key;
1064         struct btrfs_extent_data_ref *ref;
1065         struct extent_buffer *leaf;
1066         u32 nritems;
1067         int ret;
1068         int recow;
1069         int err = -ENOENT;
1070
1071         key.objectid = bytenr;
1072         if (parent) {
1073                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1074                 key.offset = parent;
1075         } else {
1076                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1077                 key.offset = hash_extent_data_ref(root_objectid,
1078                                                   owner, offset);
1079         }
1080 again:
1081         recow = 0;
1082         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1083         if (ret < 0) {
1084                 err = ret;
1085                 goto fail;
1086         }
1087
1088         if (parent) {
1089                 if (!ret)
1090                         return 0;
1091 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1092                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1093                 btrfs_release_path(path);
1094                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1095                 if (ret < 0) {
1096                         err = ret;
1097                         goto fail;
1098                 }
1099                 if (!ret)
1100                         return 0;
1101 #endif
1102                 goto fail;
1103         }
1104
1105         leaf = path->nodes[0];
1106         nritems = btrfs_header_nritems(leaf);
1107         while (1) {
1108                 if (path->slots[0] >= nritems) {
1109                         ret = btrfs_next_leaf(root, path);
1110                         if (ret < 0)
1111                                 err = ret;
1112                         if (ret)
1113                                 goto fail;
1114
1115                         leaf = path->nodes[0];
1116                         nritems = btrfs_header_nritems(leaf);
1117                         recow = 1;
1118                 }
1119
1120                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1121                 if (key.objectid != bytenr ||
1122                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1123                         goto fail;
1124
1125                 ref = btrfs_item_ptr(leaf, path->slots[0],
1126                                      struct btrfs_extent_data_ref);
1127
1128                 if (match_extent_data_ref(leaf, ref, root_objectid,
1129                                           owner, offset)) {
1130                         if (recow) {
1131                                 btrfs_release_path(path);
1132                                 goto again;
1133                         }
1134                         err = 0;
1135                         break;
1136                 }
1137                 path->slots[0]++;
1138         }
1139 fail:
1140         return err;
1141 }
1142
1143 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1144                                            struct btrfs_root *root,
1145                                            struct btrfs_path *path,
1146                                            u64 bytenr, u64 parent,
1147                                            u64 root_objectid, u64 owner,
1148                                            u64 offset, int refs_to_add)
1149 {
1150         struct btrfs_key key;
1151         struct extent_buffer *leaf;
1152         u32 size;
1153         u32 num_refs;
1154         int ret;
1155
1156         key.objectid = bytenr;
1157         if (parent) {
1158                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1159                 key.offset = parent;
1160                 size = sizeof(struct btrfs_shared_data_ref);
1161         } else {
1162                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1163                 key.offset = hash_extent_data_ref(root_objectid,
1164                                                   owner, offset);
1165                 size = sizeof(struct btrfs_extent_data_ref);
1166         }
1167
1168         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1169         if (ret && ret != -EEXIST)
1170                 goto fail;
1171
1172         leaf = path->nodes[0];
1173         if (parent) {
1174                 struct btrfs_shared_data_ref *ref;
1175                 ref = btrfs_item_ptr(leaf, path->slots[0],
1176                                      struct btrfs_shared_data_ref);
1177                 if (ret == 0) {
1178                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1179                 } else {
1180                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1181                         num_refs += refs_to_add;
1182                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1183                 }
1184         } else {
1185                 struct btrfs_extent_data_ref *ref;
1186                 while (ret == -EEXIST) {
1187                         ref = btrfs_item_ptr(leaf, path->slots[0],
1188                                              struct btrfs_extent_data_ref);
1189                         if (match_extent_data_ref(leaf, ref, root_objectid,
1190                                                   owner, offset))
1191                                 break;
1192                         btrfs_release_path(path);
1193                         key.offset++;
1194                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1195                                                       size);
1196                         if (ret && ret != -EEXIST)
1197                                 goto fail;
1198
1199                         leaf = path->nodes[0];
1200                 }
1201                 ref = btrfs_item_ptr(leaf, path->slots[0],
1202                                      struct btrfs_extent_data_ref);
1203                 if (ret == 0) {
1204                         btrfs_set_extent_data_ref_root(leaf, ref,
1205                                                        root_objectid);
1206                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1207                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1208                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1209                 } else {
1210                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1211                         num_refs += refs_to_add;
1212                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1213                 }
1214         }
1215         btrfs_mark_buffer_dirty(leaf);
1216         ret = 0;
1217 fail:
1218         btrfs_release_path(path);
1219         return ret;
1220 }
1221
1222 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1223                                            struct btrfs_root *root,
1224                                            struct btrfs_path *path,
1225                                            int refs_to_drop)
1226 {
1227         struct btrfs_key key;
1228         struct btrfs_extent_data_ref *ref1 = NULL;
1229         struct btrfs_shared_data_ref *ref2 = NULL;
1230         struct extent_buffer *leaf;
1231         u32 num_refs = 0;
1232         int ret = 0;
1233
1234         leaf = path->nodes[0];
1235         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1236
1237         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1238                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1239                                       struct btrfs_extent_data_ref);
1240                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1241         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1242                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1243                                       struct btrfs_shared_data_ref);
1244                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1245 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1246         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1247                 struct btrfs_extent_ref_v0 *ref0;
1248                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1249                                       struct btrfs_extent_ref_v0);
1250                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1251 #endif
1252         } else {
1253                 BUG();
1254         }
1255
1256         BUG_ON(num_refs < refs_to_drop);
1257         num_refs -= refs_to_drop;
1258
1259         if (num_refs == 0) {
1260                 ret = btrfs_del_item(trans, root, path);
1261         } else {
1262                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1263                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1264                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1265                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1266 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1267                 else {
1268                         struct btrfs_extent_ref_v0 *ref0;
1269                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1270                                         struct btrfs_extent_ref_v0);
1271                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1272                 }
1273 #endif
1274                 btrfs_mark_buffer_dirty(leaf);
1275         }
1276         return ret;
1277 }
1278
1279 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1280                                           struct btrfs_path *path,
1281                                           struct btrfs_extent_inline_ref *iref)
1282 {
1283         struct btrfs_key key;
1284         struct extent_buffer *leaf;
1285         struct btrfs_extent_data_ref *ref1;
1286         struct btrfs_shared_data_ref *ref2;
1287         u32 num_refs = 0;
1288
1289         leaf = path->nodes[0];
1290         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1291         if (iref) {
1292                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1293                     BTRFS_EXTENT_DATA_REF_KEY) {
1294                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1295                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1296                 } else {
1297                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1298                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1299                 }
1300         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1301                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1302                                       struct btrfs_extent_data_ref);
1303                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1304         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1305                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1306                                       struct btrfs_shared_data_ref);
1307                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1308 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1309         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1310                 struct btrfs_extent_ref_v0 *ref0;
1311                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1312                                       struct btrfs_extent_ref_v0);
1313                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1314 #endif
1315         } else {
1316                 WARN_ON(1);
1317         }
1318         return num_refs;
1319 }
1320
1321 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1322                                           struct btrfs_root *root,
1323                                           struct btrfs_path *path,
1324                                           u64 bytenr, u64 parent,
1325                                           u64 root_objectid)
1326 {
1327         struct btrfs_key key;
1328         int ret;
1329
1330         key.objectid = bytenr;
1331         if (parent) {
1332                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1333                 key.offset = parent;
1334         } else {
1335                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1336                 key.offset = root_objectid;
1337         }
1338
1339         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1340         if (ret > 0)
1341                 ret = -ENOENT;
1342 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1343         if (ret == -ENOENT && parent) {
1344                 btrfs_release_path(path);
1345                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1346                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1347                 if (ret > 0)
1348                         ret = -ENOENT;
1349         }
1350 #endif
1351         return ret;
1352 }
1353
1354 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1355                                           struct btrfs_root *root,
1356                                           struct btrfs_path *path,
1357                                           u64 bytenr, u64 parent,
1358                                           u64 root_objectid)
1359 {
1360         struct btrfs_key key;
1361         int ret;
1362
1363         key.objectid = bytenr;
1364         if (parent) {
1365                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1366                 key.offset = parent;
1367         } else {
1368                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1369                 key.offset = root_objectid;
1370         }
1371
1372         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1373         btrfs_release_path(path);
1374         return ret;
1375 }
1376
1377 static inline int extent_ref_type(u64 parent, u64 owner)
1378 {
1379         int type;
1380         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1381                 if (parent > 0)
1382                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1383                 else
1384                         type = BTRFS_TREE_BLOCK_REF_KEY;
1385         } else {
1386                 if (parent > 0)
1387                         type = BTRFS_SHARED_DATA_REF_KEY;
1388                 else
1389                         type = BTRFS_EXTENT_DATA_REF_KEY;
1390         }
1391         return type;
1392 }
1393
1394 static int find_next_key(struct btrfs_path *path, int level,
1395                          struct btrfs_key *key)
1396
1397 {
1398         for (; level < BTRFS_MAX_LEVEL; level++) {
1399                 if (!path->nodes[level])
1400                         break;
1401                 if (path->slots[level] + 1 >=
1402                     btrfs_header_nritems(path->nodes[level]))
1403                         continue;
1404                 if (level == 0)
1405                         btrfs_item_key_to_cpu(path->nodes[level], key,
1406                                               path->slots[level] + 1);
1407                 else
1408                         btrfs_node_key_to_cpu(path->nodes[level], key,
1409                                               path->slots[level] + 1);
1410                 return 0;
1411         }
1412         return 1;
1413 }
1414
1415 /*
1416  * look for inline back ref. if back ref is found, *ref_ret is set
1417  * to the address of inline back ref, and 0 is returned.
1418  *
1419  * if back ref isn't found, *ref_ret is set to the address where it
1420  * should be inserted, and -ENOENT is returned.
1421  *
1422  * if insert is true and there are too many inline back refs, the path
1423  * points to the extent item, and -EAGAIN is returned.
1424  *
1425  * NOTE: inline back refs are ordered in the same way that back ref
1426  *       items in the tree are ordered.
1427  */
1428 static noinline_for_stack
1429 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1430                                  struct btrfs_root *root,
1431                                  struct btrfs_path *path,
1432                                  struct btrfs_extent_inline_ref **ref_ret,
1433                                  u64 bytenr, u64 num_bytes,
1434                                  u64 parent, u64 root_objectid,
1435                                  u64 owner, u64 offset, int insert)
1436 {
1437         struct btrfs_key key;
1438         struct extent_buffer *leaf;
1439         struct btrfs_extent_item *ei;
1440         struct btrfs_extent_inline_ref *iref;
1441         u64 flags;
1442         u64 item_size;
1443         unsigned long ptr;
1444         unsigned long end;
1445         int extra_size;
1446         int type;
1447         int want;
1448         int ret;
1449         int err = 0;
1450
1451         key.objectid = bytenr;
1452         key.type = BTRFS_EXTENT_ITEM_KEY;
1453         key.offset = num_bytes;
1454
1455         want = extent_ref_type(parent, owner);
1456         if (insert) {
1457                 extra_size = btrfs_extent_inline_ref_size(want);
1458                 path->keep_locks = 1;
1459         } else
1460                 extra_size = -1;
1461         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1462         if (ret < 0) {
1463                 err = ret;
1464                 goto out;
1465         }
1466         if (ret && !insert) {
1467                 err = -ENOENT;
1468                 goto out;
1469         }
1470         BUG_ON(ret); /* Corruption */
1471
1472         leaf = path->nodes[0];
1473         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1474 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1475         if (item_size < sizeof(*ei)) {
1476                 if (!insert) {
1477                         err = -ENOENT;
1478                         goto out;
1479                 }
1480                 ret = convert_extent_item_v0(trans, root, path, owner,
1481                                              extra_size);
1482                 if (ret < 0) {
1483                         err = ret;
1484                         goto out;
1485                 }
1486                 leaf = path->nodes[0];
1487                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1488         }
1489 #endif
1490         BUG_ON(item_size < sizeof(*ei));
1491
1492         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1493         flags = btrfs_extent_flags(leaf, ei);
1494
1495         ptr = (unsigned long)(ei + 1);
1496         end = (unsigned long)ei + item_size;
1497
1498         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1499                 ptr += sizeof(struct btrfs_tree_block_info);
1500                 BUG_ON(ptr > end);
1501         } else {
1502                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1503         }
1504
1505         err = -ENOENT;
1506         while (1) {
1507                 if (ptr >= end) {
1508                         WARN_ON(ptr > end);
1509                         break;
1510                 }
1511                 iref = (struct btrfs_extent_inline_ref *)ptr;
1512                 type = btrfs_extent_inline_ref_type(leaf, iref);
1513                 if (want < type)
1514                         break;
1515                 if (want > type) {
1516                         ptr += btrfs_extent_inline_ref_size(type);
1517                         continue;
1518                 }
1519
1520                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1521                         struct btrfs_extent_data_ref *dref;
1522                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1523                         if (match_extent_data_ref(leaf, dref, root_objectid,
1524                                                   owner, offset)) {
1525                                 err = 0;
1526                                 break;
1527                         }
1528                         if (hash_extent_data_ref_item(leaf, dref) <
1529                             hash_extent_data_ref(root_objectid, owner, offset))
1530                                 break;
1531                 } else {
1532                         u64 ref_offset;
1533                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1534                         if (parent > 0) {
1535                                 if (parent == ref_offset) {
1536                                         err = 0;
1537                                         break;
1538                                 }
1539                                 if (ref_offset < parent)
1540                                         break;
1541                         } else {
1542                                 if (root_objectid == ref_offset) {
1543                                         err = 0;
1544                                         break;
1545                                 }
1546                                 if (ref_offset < root_objectid)
1547                                         break;
1548                         }
1549                 }
1550                 ptr += btrfs_extent_inline_ref_size(type);
1551         }
1552         if (err == -ENOENT && insert) {
1553                 if (item_size + extra_size >=
1554                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1555                         err = -EAGAIN;
1556                         goto out;
1557                 }
1558                 /*
1559                  * To add new inline back ref, we have to make sure
1560                  * there is no corresponding back ref item.
1561                  * For simplicity, we just do not add new inline back
1562                  * ref if there is any kind of item for this block
1563                  */
1564                 if (find_next_key(path, 0, &key) == 0 &&
1565                     key.objectid == bytenr &&
1566                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1567                         err = -EAGAIN;
1568                         goto out;
1569                 }
1570         }
1571         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1572 out:
1573         if (insert) {
1574                 path->keep_locks = 0;
1575                 btrfs_unlock_up_safe(path, 1);
1576         }
1577         return err;
1578 }
1579
1580 /*
1581  * helper to add new inline back ref
1582  */
1583 static noinline_for_stack
1584 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1585                                  struct btrfs_root *root,
1586                                  struct btrfs_path *path,
1587                                  struct btrfs_extent_inline_ref *iref,
1588                                  u64 parent, u64 root_objectid,
1589                                  u64 owner, u64 offset, int refs_to_add,
1590                                  struct btrfs_delayed_extent_op *extent_op)
1591 {
1592         struct extent_buffer *leaf;
1593         struct btrfs_extent_item *ei;
1594         unsigned long ptr;
1595         unsigned long end;
1596         unsigned long item_offset;
1597         u64 refs;
1598         int size;
1599         int type;
1600
1601         leaf = path->nodes[0];
1602         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1603         item_offset = (unsigned long)iref - (unsigned long)ei;
1604
1605         type = extent_ref_type(parent, owner);
1606         size = btrfs_extent_inline_ref_size(type);
1607
1608         btrfs_extend_item(trans, root, path, size);
1609
1610         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1611         refs = btrfs_extent_refs(leaf, ei);
1612         refs += refs_to_add;
1613         btrfs_set_extent_refs(leaf, ei, refs);
1614         if (extent_op)
1615                 __run_delayed_extent_op(extent_op, leaf, ei);
1616
1617         ptr = (unsigned long)ei + item_offset;
1618         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1619         if (ptr < end - size)
1620                 memmove_extent_buffer(leaf, ptr + size, ptr,
1621                                       end - size - ptr);
1622
1623         iref = (struct btrfs_extent_inline_ref *)ptr;
1624         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1625         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1626                 struct btrfs_extent_data_ref *dref;
1627                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1628                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1629                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1630                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1631                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1632         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1633                 struct btrfs_shared_data_ref *sref;
1634                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1635                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1636                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1637         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1638                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1639         } else {
1640                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1641         }
1642         btrfs_mark_buffer_dirty(leaf);
1643 }
1644
1645 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1646                                  struct btrfs_root *root,
1647                                  struct btrfs_path *path,
1648                                  struct btrfs_extent_inline_ref **ref_ret,
1649                                  u64 bytenr, u64 num_bytes, u64 parent,
1650                                  u64 root_objectid, u64 owner, u64 offset)
1651 {
1652         int ret;
1653
1654         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1655                                            bytenr, num_bytes, parent,
1656                                            root_objectid, owner, offset, 0);
1657         if (ret != -ENOENT)
1658                 return ret;
1659
1660         btrfs_release_path(path);
1661         *ref_ret = NULL;
1662
1663         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1664                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1665                                             root_objectid);
1666         } else {
1667                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1668                                              root_objectid, owner, offset);
1669         }
1670         return ret;
1671 }
1672
1673 /*
1674  * helper to update/remove inline back ref
1675  */
1676 static noinline_for_stack
1677 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1678                                   struct btrfs_root *root,
1679                                   struct btrfs_path *path,
1680                                   struct btrfs_extent_inline_ref *iref,
1681                                   int refs_to_mod,
1682                                   struct btrfs_delayed_extent_op *extent_op)
1683 {
1684         struct extent_buffer *leaf;
1685         struct btrfs_extent_item *ei;
1686         struct btrfs_extent_data_ref *dref = NULL;
1687         struct btrfs_shared_data_ref *sref = NULL;
1688         unsigned long ptr;
1689         unsigned long end;
1690         u32 item_size;
1691         int size;
1692         int type;
1693         u64 refs;
1694
1695         leaf = path->nodes[0];
1696         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1697         refs = btrfs_extent_refs(leaf, ei);
1698         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1699         refs += refs_to_mod;
1700         btrfs_set_extent_refs(leaf, ei, refs);
1701         if (extent_op)
1702                 __run_delayed_extent_op(extent_op, leaf, ei);
1703
1704         type = btrfs_extent_inline_ref_type(leaf, iref);
1705
1706         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1707                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1708                 refs = btrfs_extent_data_ref_count(leaf, dref);
1709         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1710                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1711                 refs = btrfs_shared_data_ref_count(leaf, sref);
1712         } else {
1713                 refs = 1;
1714                 BUG_ON(refs_to_mod != -1);
1715         }
1716
1717         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1718         refs += refs_to_mod;
1719
1720         if (refs > 0) {
1721                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1722                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1723                 else
1724                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1725         } else {
1726                 size =  btrfs_extent_inline_ref_size(type);
1727                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1728                 ptr = (unsigned long)iref;
1729                 end = (unsigned long)ei + item_size;
1730                 if (ptr + size < end)
1731                         memmove_extent_buffer(leaf, ptr, ptr + size,
1732                                               end - ptr - size);
1733                 item_size -= size;
1734                 btrfs_truncate_item(trans, root, path, item_size, 1);
1735         }
1736         btrfs_mark_buffer_dirty(leaf);
1737 }
1738
1739 static noinline_for_stack
1740 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1741                                  struct btrfs_root *root,
1742                                  struct btrfs_path *path,
1743                                  u64 bytenr, u64 num_bytes, u64 parent,
1744                                  u64 root_objectid, u64 owner,
1745                                  u64 offset, int refs_to_add,
1746                                  struct btrfs_delayed_extent_op *extent_op)
1747 {
1748         struct btrfs_extent_inline_ref *iref;
1749         int ret;
1750
1751         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1752                                            bytenr, num_bytes, parent,
1753                                            root_objectid, owner, offset, 1);
1754         if (ret == 0) {
1755                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1756                 update_inline_extent_backref(trans, root, path, iref,
1757                                              refs_to_add, extent_op);
1758         } else if (ret == -ENOENT) {
1759                 setup_inline_extent_backref(trans, root, path, iref, parent,
1760                                             root_objectid, owner, offset,
1761                                             refs_to_add, extent_op);
1762                 ret = 0;
1763         }
1764         return ret;
1765 }
1766
1767 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1768                                  struct btrfs_root *root,
1769                                  struct btrfs_path *path,
1770                                  u64 bytenr, u64 parent, u64 root_objectid,
1771                                  u64 owner, u64 offset, int refs_to_add)
1772 {
1773         int ret;
1774         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1775                 BUG_ON(refs_to_add != 1);
1776                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1777                                             parent, root_objectid);
1778         } else {
1779                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1780                                              parent, root_objectid,
1781                                              owner, offset, refs_to_add);
1782         }
1783         return ret;
1784 }
1785
1786 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1787                                  struct btrfs_root *root,
1788                                  struct btrfs_path *path,
1789                                  struct btrfs_extent_inline_ref *iref,
1790                                  int refs_to_drop, int is_data)
1791 {
1792         int ret = 0;
1793
1794         BUG_ON(!is_data && refs_to_drop != 1);
1795         if (iref) {
1796                 update_inline_extent_backref(trans, root, path, iref,
1797                                              -refs_to_drop, NULL);
1798         } else if (is_data) {
1799                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1800         } else {
1801                 ret = btrfs_del_item(trans, root, path);
1802         }
1803         return ret;
1804 }
1805
1806 static int btrfs_issue_discard(struct block_device *bdev,
1807                                 u64 start, u64 len)
1808 {
1809         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1810 }
1811
1812 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1813                                 u64 num_bytes, u64 *actual_bytes)
1814 {
1815         int ret;
1816         u64 discarded_bytes = 0;
1817         struct btrfs_bio *bbio = NULL;
1818
1819
1820         /* Tell the block device(s) that the sectors can be discarded */
1821         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1822                               bytenr, &num_bytes, &bbio, 0);
1823         /* Error condition is -ENOMEM */
1824         if (!ret) {
1825                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1826                 int i;
1827
1828
1829                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1830                         if (!stripe->dev->can_discard)
1831                                 continue;
1832
1833                         ret = btrfs_issue_discard(stripe->dev->bdev,
1834                                                   stripe->physical,
1835                                                   stripe->length);
1836                         if (!ret)
1837                                 discarded_bytes += stripe->length;
1838                         else if (ret != -EOPNOTSUPP)
1839                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1840
1841                         /*
1842                          * Just in case we get back EOPNOTSUPP for some reason,
1843                          * just ignore the return value so we don't screw up
1844                          * people calling discard_extent.
1845                          */
1846                         ret = 0;
1847                 }
1848                 kfree(bbio);
1849         }
1850
1851         if (actual_bytes)
1852                 *actual_bytes = discarded_bytes;
1853
1854
1855         return ret;
1856 }
1857
1858 /* Can return -ENOMEM */
1859 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1860                          struct btrfs_root *root,
1861                          u64 bytenr, u64 num_bytes, u64 parent,
1862                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1863 {
1864         int ret;
1865         struct btrfs_fs_info *fs_info = root->fs_info;
1866
1867         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1868                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1869
1870         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1871                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1872                                         num_bytes,
1873                                         parent, root_objectid, (int)owner,
1874                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1875         } else {
1876                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1877                                         num_bytes,
1878                                         parent, root_objectid, owner, offset,
1879                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1880         }
1881         return ret;
1882 }
1883
1884 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1885                                   struct btrfs_root *root,
1886                                   u64 bytenr, u64 num_bytes,
1887                                   u64 parent, u64 root_objectid,
1888                                   u64 owner, u64 offset, int refs_to_add,
1889                                   struct btrfs_delayed_extent_op *extent_op)
1890 {
1891         struct btrfs_path *path;
1892         struct extent_buffer *leaf;
1893         struct btrfs_extent_item *item;
1894         u64 refs;
1895         int ret;
1896         int err = 0;
1897
1898         path = btrfs_alloc_path();
1899         if (!path)
1900                 return -ENOMEM;
1901
1902         path->reada = 1;
1903         path->leave_spinning = 1;
1904         /* this will setup the path even if it fails to insert the back ref */
1905         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1906                                            path, bytenr, num_bytes, parent,
1907                                            root_objectid, owner, offset,
1908                                            refs_to_add, extent_op);
1909         if (ret == 0)
1910                 goto out;
1911
1912         if (ret != -EAGAIN) {
1913                 err = ret;
1914                 goto out;
1915         }
1916
1917         leaf = path->nodes[0];
1918         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1919         refs = btrfs_extent_refs(leaf, item);
1920         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1921         if (extent_op)
1922                 __run_delayed_extent_op(extent_op, leaf, item);
1923
1924         btrfs_mark_buffer_dirty(leaf);
1925         btrfs_release_path(path);
1926
1927         path->reada = 1;
1928         path->leave_spinning = 1;
1929
1930         /* now insert the actual backref */
1931         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1932                                     path, bytenr, parent, root_objectid,
1933                                     owner, offset, refs_to_add);
1934         if (ret)
1935                 btrfs_abort_transaction(trans, root, ret);
1936 out:
1937         btrfs_free_path(path);
1938         return err;
1939 }
1940
1941 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1942                                 struct btrfs_root *root,
1943                                 struct btrfs_delayed_ref_node *node,
1944                                 struct btrfs_delayed_extent_op *extent_op,
1945                                 int insert_reserved)
1946 {
1947         int ret = 0;
1948         struct btrfs_delayed_data_ref *ref;
1949         struct btrfs_key ins;
1950         u64 parent = 0;
1951         u64 ref_root = 0;
1952         u64 flags = 0;
1953
1954         ins.objectid = node->bytenr;
1955         ins.offset = node->num_bytes;
1956         ins.type = BTRFS_EXTENT_ITEM_KEY;
1957
1958         ref = btrfs_delayed_node_to_data_ref(node);
1959         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1960                 parent = ref->parent;
1961         else
1962                 ref_root = ref->root;
1963
1964         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1965                 if (extent_op) {
1966                         BUG_ON(extent_op->update_key);
1967                         flags |= extent_op->flags_to_set;
1968                 }
1969                 ret = alloc_reserved_file_extent(trans, root,
1970                                                  parent, ref_root, flags,
1971                                                  ref->objectid, ref->offset,
1972                                                  &ins, node->ref_mod);
1973         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1974                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1975                                              node->num_bytes, parent,
1976                                              ref_root, ref->objectid,
1977                                              ref->offset, node->ref_mod,
1978                                              extent_op);
1979         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1980                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1981                                           node->num_bytes, parent,
1982                                           ref_root, ref->objectid,
1983                                           ref->offset, node->ref_mod,
1984                                           extent_op);
1985         } else {
1986                 BUG();
1987         }
1988         return ret;
1989 }
1990
1991 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1992                                     struct extent_buffer *leaf,
1993                                     struct btrfs_extent_item *ei)
1994 {
1995         u64 flags = btrfs_extent_flags(leaf, ei);
1996         if (extent_op->update_flags) {
1997                 flags |= extent_op->flags_to_set;
1998                 btrfs_set_extent_flags(leaf, ei, flags);
1999         }
2000
2001         if (extent_op->update_key) {
2002                 struct btrfs_tree_block_info *bi;
2003                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2004                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2005                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2006         }
2007 }
2008
2009 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2010                                  struct btrfs_root *root,
2011                                  struct btrfs_delayed_ref_node *node,
2012                                  struct btrfs_delayed_extent_op *extent_op)
2013 {
2014         struct btrfs_key key;
2015         struct btrfs_path *path;
2016         struct btrfs_extent_item *ei;
2017         struct extent_buffer *leaf;
2018         u32 item_size;
2019         int ret;
2020         int err = 0;
2021
2022         if (trans->aborted)
2023                 return 0;
2024
2025         path = btrfs_alloc_path();
2026         if (!path)
2027                 return -ENOMEM;
2028
2029         key.objectid = node->bytenr;
2030         key.type = BTRFS_EXTENT_ITEM_KEY;
2031         key.offset = node->num_bytes;
2032
2033         path->reada = 1;
2034         path->leave_spinning = 1;
2035         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2036                                 path, 0, 1);
2037         if (ret < 0) {
2038                 err = ret;
2039                 goto out;
2040         }
2041         if (ret > 0) {
2042                 err = -EIO;
2043                 goto out;
2044         }
2045
2046         leaf = path->nodes[0];
2047         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2048 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2049         if (item_size < sizeof(*ei)) {
2050                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2051                                              path, (u64)-1, 0);
2052                 if (ret < 0) {
2053                         err = ret;
2054                         goto out;
2055                 }
2056                 leaf = path->nodes[0];
2057                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2058         }
2059 #endif
2060         BUG_ON(item_size < sizeof(*ei));
2061         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2062         __run_delayed_extent_op(extent_op, leaf, ei);
2063
2064         btrfs_mark_buffer_dirty(leaf);
2065 out:
2066         btrfs_free_path(path);
2067         return err;
2068 }
2069
2070 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2071                                 struct btrfs_root *root,
2072                                 struct btrfs_delayed_ref_node *node,
2073                                 struct btrfs_delayed_extent_op *extent_op,
2074                                 int insert_reserved)
2075 {
2076         int ret = 0;
2077         struct btrfs_delayed_tree_ref *ref;
2078         struct btrfs_key ins;
2079         u64 parent = 0;
2080         u64 ref_root = 0;
2081
2082         ins.objectid = node->bytenr;
2083         ins.offset = node->num_bytes;
2084         ins.type = BTRFS_EXTENT_ITEM_KEY;
2085
2086         ref = btrfs_delayed_node_to_tree_ref(node);
2087         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2088                 parent = ref->parent;
2089         else
2090                 ref_root = ref->root;
2091
2092         BUG_ON(node->ref_mod != 1);
2093         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2094                 BUG_ON(!extent_op || !extent_op->update_flags ||
2095                        !extent_op->update_key);
2096                 ret = alloc_reserved_tree_block(trans, root,
2097                                                 parent, ref_root,
2098                                                 extent_op->flags_to_set,
2099                                                 &extent_op->key,
2100                                                 ref->level, &ins);
2101         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2102                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2103                                              node->num_bytes, parent, ref_root,
2104                                              ref->level, 0, 1, extent_op);
2105         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2106                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2107                                           node->num_bytes, parent, ref_root,
2108                                           ref->level, 0, 1, extent_op);
2109         } else {
2110                 BUG();
2111         }
2112         return ret;
2113 }
2114
2115 /* helper function to actually process a single delayed ref entry */
2116 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2117                                struct btrfs_root *root,
2118                                struct btrfs_delayed_ref_node *node,
2119                                struct btrfs_delayed_extent_op *extent_op,
2120                                int insert_reserved)
2121 {
2122         int ret = 0;
2123
2124         if (trans->aborted)
2125                 return 0;
2126
2127         if (btrfs_delayed_ref_is_head(node)) {
2128                 struct btrfs_delayed_ref_head *head;
2129                 /*
2130                  * we've hit the end of the chain and we were supposed
2131                  * to insert this extent into the tree.  But, it got
2132                  * deleted before we ever needed to insert it, so all
2133                  * we have to do is clean up the accounting
2134                  */
2135                 BUG_ON(extent_op);
2136                 head = btrfs_delayed_node_to_head(node);
2137                 if (insert_reserved) {
2138                         btrfs_pin_extent(root, node->bytenr,
2139                                          node->num_bytes, 1);
2140                         if (head->is_data) {
2141                                 ret = btrfs_del_csums(trans, root,
2142                                                       node->bytenr,
2143                                                       node->num_bytes);
2144                         }
2145                 }
2146                 return ret;
2147         }
2148
2149         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2150             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2151                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2152                                            insert_reserved);
2153         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2154                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2155                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2156                                            insert_reserved);
2157         else
2158                 BUG();
2159         return ret;
2160 }
2161
2162 static noinline struct btrfs_delayed_ref_node *
2163 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2164 {
2165         struct rb_node *node;
2166         struct btrfs_delayed_ref_node *ref;
2167         int action = BTRFS_ADD_DELAYED_REF;
2168 again:
2169         /*
2170          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2171          * this prevents ref count from going down to zero when
2172          * there still are pending delayed ref.
2173          */
2174         node = rb_prev(&head->node.rb_node);
2175         while (1) {
2176                 if (!node)
2177                         break;
2178                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2179                                 rb_node);
2180                 if (ref->bytenr != head->node.bytenr)
2181                         break;
2182                 if (ref->action == action)
2183                         return ref;
2184                 node = rb_prev(node);
2185         }
2186         if (action == BTRFS_ADD_DELAYED_REF) {
2187                 action = BTRFS_DROP_DELAYED_REF;
2188                 goto again;
2189         }
2190         return NULL;
2191 }
2192
2193 /*
2194  * Returns 0 on success or if called with an already aborted transaction.
2195  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2196  */
2197 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2198                                        struct btrfs_root *root,
2199                                        struct list_head *cluster)
2200 {
2201         struct btrfs_delayed_ref_root *delayed_refs;
2202         struct btrfs_delayed_ref_node *ref;
2203         struct btrfs_delayed_ref_head *locked_ref = NULL;
2204         struct btrfs_delayed_extent_op *extent_op;
2205         struct btrfs_fs_info *fs_info = root->fs_info;
2206         int ret;
2207         int count = 0;
2208         int must_insert_reserved = 0;
2209
2210         delayed_refs = &trans->transaction->delayed_refs;
2211         while (1) {
2212                 if (!locked_ref) {
2213                         /* pick a new head ref from the cluster list */
2214                         if (list_empty(cluster))
2215                                 break;
2216
2217                         locked_ref = list_entry(cluster->next,
2218                                      struct btrfs_delayed_ref_head, cluster);
2219
2220                         /* grab the lock that says we are going to process
2221                          * all the refs for this head */
2222                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2223
2224                         /*
2225                          * we may have dropped the spin lock to get the head
2226                          * mutex lock, and that might have given someone else
2227                          * time to free the head.  If that's true, it has been
2228                          * removed from our list and we can move on.
2229                          */
2230                         if (ret == -EAGAIN) {
2231                                 locked_ref = NULL;
2232                                 count++;
2233                                 continue;
2234                         }
2235                 }
2236
2237                 /*
2238                  * We need to try and merge add/drops of the same ref since we
2239                  * can run into issues with relocate dropping the implicit ref
2240                  * and then it being added back again before the drop can
2241                  * finish.  If we merged anything we need to re-loop so we can
2242                  * get a good ref.
2243                  */
2244                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2245                                          locked_ref);
2246
2247                 /*
2248                  * locked_ref is the head node, so we have to go one
2249                  * node back for any delayed ref updates
2250                  */
2251                 ref = select_delayed_ref(locked_ref);
2252
2253                 if (ref && ref->seq &&
2254                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2255                         /*
2256                          * there are still refs with lower seq numbers in the
2257                          * process of being added. Don't run this ref yet.
2258                          */
2259                         list_del_init(&locked_ref->cluster);
2260                         btrfs_delayed_ref_unlock(locked_ref);
2261                         locked_ref = NULL;
2262                         delayed_refs->num_heads_ready++;
2263                         spin_unlock(&delayed_refs->lock);
2264                         cond_resched();
2265                         spin_lock(&delayed_refs->lock);
2266                         continue;
2267                 }
2268
2269                 /*
2270                  * record the must insert reserved flag before we
2271                  * drop the spin lock.
2272                  */
2273                 must_insert_reserved = locked_ref->must_insert_reserved;
2274                 locked_ref->must_insert_reserved = 0;
2275
2276                 extent_op = locked_ref->extent_op;
2277                 locked_ref->extent_op = NULL;
2278
2279                 if (!ref) {
2280                         /* All delayed refs have been processed, Go ahead
2281                          * and send the head node to run_one_delayed_ref,
2282                          * so that any accounting fixes can happen
2283                          */
2284                         ref = &locked_ref->node;
2285
2286                         if (extent_op && must_insert_reserved) {
2287                                 btrfs_free_delayed_extent_op(extent_op);
2288                                 extent_op = NULL;
2289                         }
2290
2291                         if (extent_op) {
2292                                 spin_unlock(&delayed_refs->lock);
2293
2294                                 ret = run_delayed_extent_op(trans, root,
2295                                                             ref, extent_op);
2296                                 btrfs_free_delayed_extent_op(extent_op);
2297
2298                                 if (ret) {
2299                                         printk(KERN_DEBUG
2300                                                "btrfs: run_delayed_extent_op "
2301                                                "returned %d\n", ret);
2302                                         spin_lock(&delayed_refs->lock);
2303                                         btrfs_delayed_ref_unlock(locked_ref);
2304                                         return ret;
2305                                 }
2306
2307                                 goto next;
2308                         }
2309                 }
2310
2311                 ref->in_tree = 0;
2312                 rb_erase(&ref->rb_node, &delayed_refs->root);
2313                 delayed_refs->num_entries--;
2314                 if (!btrfs_delayed_ref_is_head(ref)) {
2315                         /*
2316                          * when we play the delayed ref, also correct the
2317                          * ref_mod on head
2318                          */
2319                         switch (ref->action) {
2320                         case BTRFS_ADD_DELAYED_REF:
2321                         case BTRFS_ADD_DELAYED_EXTENT:
2322                                 locked_ref->node.ref_mod -= ref->ref_mod;
2323                                 break;
2324                         case BTRFS_DROP_DELAYED_REF:
2325                                 locked_ref->node.ref_mod += ref->ref_mod;
2326                                 break;
2327                         default:
2328                                 WARN_ON(1);
2329                         }
2330                 }
2331                 spin_unlock(&delayed_refs->lock);
2332
2333                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2334                                           must_insert_reserved);
2335
2336                 btrfs_free_delayed_extent_op(extent_op);
2337                 if (ret) {
2338                         btrfs_delayed_ref_unlock(locked_ref);
2339                         btrfs_put_delayed_ref(ref);
2340                         printk(KERN_DEBUG
2341                                "btrfs: run_one_delayed_ref returned %d\n", ret);
2342                         spin_lock(&delayed_refs->lock);
2343                         return ret;
2344                 }
2345
2346                 /*
2347                  * If this node is a head, that means all the refs in this head
2348                  * have been dealt with, and we will pick the next head to deal
2349                  * with, so we must unlock the head and drop it from the cluster
2350                  * list before we release it.
2351                  */
2352                 if (btrfs_delayed_ref_is_head(ref)) {
2353                         list_del_init(&locked_ref->cluster);
2354                         btrfs_delayed_ref_unlock(locked_ref);
2355                         locked_ref = NULL;
2356                 }
2357                 btrfs_put_delayed_ref(ref);
2358                 count++;
2359 next:
2360                 cond_resched();
2361                 spin_lock(&delayed_refs->lock);
2362         }
2363         return count;
2364 }
2365
2366 #ifdef SCRAMBLE_DELAYED_REFS
2367 /*
2368  * Normally delayed refs get processed in ascending bytenr order. This
2369  * correlates in most cases to the order added. To expose dependencies on this
2370  * order, we start to process the tree in the middle instead of the beginning
2371  */
2372 static u64 find_middle(struct rb_root *root)
2373 {
2374         struct rb_node *n = root->rb_node;
2375         struct btrfs_delayed_ref_node *entry;
2376         int alt = 1;
2377         u64 middle;
2378         u64 first = 0, last = 0;
2379
2380         n = rb_first(root);
2381         if (n) {
2382                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2383                 first = entry->bytenr;
2384         }
2385         n = rb_last(root);
2386         if (n) {
2387                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2388                 last = entry->bytenr;
2389         }
2390         n = root->rb_node;
2391
2392         while (n) {
2393                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2394                 WARN_ON(!entry->in_tree);
2395
2396                 middle = entry->bytenr;
2397
2398                 if (alt)
2399                         n = n->rb_left;
2400                 else
2401                         n = n->rb_right;
2402
2403                 alt = 1 - alt;
2404         }
2405         return middle;
2406 }
2407 #endif
2408
2409 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2410                                          struct btrfs_fs_info *fs_info)
2411 {
2412         struct qgroup_update *qgroup_update;
2413         int ret = 0;
2414
2415         if (list_empty(&trans->qgroup_ref_list) !=
2416             !trans->delayed_ref_elem.seq) {
2417                 /* list without seq or seq without list */
2418                 printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2419                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2420                         trans->delayed_ref_elem.seq);
2421                 BUG();
2422         }
2423
2424         if (!trans->delayed_ref_elem.seq)
2425                 return 0;
2426
2427         while (!list_empty(&trans->qgroup_ref_list)) {
2428                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2429                                                  struct qgroup_update, list);
2430                 list_del(&qgroup_update->list);
2431                 if (!ret)
2432                         ret = btrfs_qgroup_account_ref(
2433                                         trans, fs_info, qgroup_update->node,
2434                                         qgroup_update->extent_op);
2435                 kfree(qgroup_update);
2436         }
2437
2438         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2439
2440         return ret;
2441 }
2442
2443 /*
2444  * this starts processing the delayed reference count updates and
2445  * extent insertions we have queued up so far.  count can be
2446  * 0, which means to process everything in the tree at the start
2447  * of the run (but not newly added entries), or it can be some target
2448  * number you'd like to process.
2449  *
2450  * Returns 0 on success or if called with an aborted transaction
2451  * Returns <0 on error and aborts the transaction
2452  */
2453 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2454                            struct btrfs_root *root, unsigned long count)
2455 {
2456         struct rb_node *node;
2457         struct btrfs_delayed_ref_root *delayed_refs;
2458         struct btrfs_delayed_ref_node *ref;
2459         struct list_head cluster;
2460         int ret;
2461         u64 delayed_start;
2462         int run_all = count == (unsigned long)-1;
2463         int run_most = 0;
2464         int loops;
2465
2466         /* We'll clean this up in btrfs_cleanup_transaction */
2467         if (trans->aborted)
2468                 return 0;
2469
2470         if (root == root->fs_info->extent_root)
2471                 root = root->fs_info->tree_root;
2472
2473         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2474
2475         delayed_refs = &trans->transaction->delayed_refs;
2476         INIT_LIST_HEAD(&cluster);
2477 again:
2478         loops = 0;
2479         spin_lock(&delayed_refs->lock);
2480
2481 #ifdef SCRAMBLE_DELAYED_REFS
2482         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2483 #endif
2484
2485         if (count == 0) {
2486                 count = delayed_refs->num_entries * 2;
2487                 run_most = 1;
2488         }
2489         while (1) {
2490                 if (!(run_all || run_most) &&
2491                     delayed_refs->num_heads_ready < 64)
2492                         break;
2493
2494                 /*
2495                  * go find something we can process in the rbtree.  We start at
2496                  * the beginning of the tree, and then build a cluster
2497                  * of refs to process starting at the first one we are able to
2498                  * lock
2499                  */
2500                 delayed_start = delayed_refs->run_delayed_start;
2501                 ret = btrfs_find_ref_cluster(trans, &cluster,
2502                                              delayed_refs->run_delayed_start);
2503                 if (ret)
2504                         break;
2505
2506                 ret = run_clustered_refs(trans, root, &cluster);
2507                 if (ret < 0) {
2508                         btrfs_release_ref_cluster(&cluster);
2509                         spin_unlock(&delayed_refs->lock);
2510                         btrfs_abort_transaction(trans, root, ret);
2511                         return ret;
2512                 }
2513
2514                 count -= min_t(unsigned long, ret, count);
2515
2516                 if (count == 0)
2517                         break;
2518
2519                 if (delayed_start >= delayed_refs->run_delayed_start) {
2520                         if (loops == 0) {
2521                                 /*
2522                                  * btrfs_find_ref_cluster looped. let's do one
2523                                  * more cycle. if we don't run any delayed ref
2524                                  * during that cycle (because we can't because
2525                                  * all of them are blocked), bail out.
2526                                  */
2527                                 loops = 1;
2528                         } else {
2529                                 /*
2530                                  * no runnable refs left, stop trying
2531                                  */
2532                                 BUG_ON(run_all);
2533                                 break;
2534                         }
2535                 }
2536                 if (ret) {
2537                         /* refs were run, let's reset staleness detection */
2538                         loops = 0;
2539                 }
2540         }
2541
2542         if (run_all) {
2543                 if (!list_empty(&trans->new_bgs)) {
2544                         spin_unlock(&delayed_refs->lock);
2545                         btrfs_create_pending_block_groups(trans, root);
2546                         spin_lock(&delayed_refs->lock);
2547                 }
2548
2549                 node = rb_first(&delayed_refs->root);
2550                 if (!node)
2551                         goto out;
2552                 count = (unsigned long)-1;
2553
2554                 while (node) {
2555                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2556                                        rb_node);
2557                         if (btrfs_delayed_ref_is_head(ref)) {
2558                                 struct btrfs_delayed_ref_head *head;
2559
2560                                 head = btrfs_delayed_node_to_head(ref);
2561                                 atomic_inc(&ref->refs);
2562
2563                                 spin_unlock(&delayed_refs->lock);
2564                                 /*
2565                                  * Mutex was contended, block until it's
2566                                  * released and try again
2567                                  */
2568                                 mutex_lock(&head->mutex);
2569                                 mutex_unlock(&head->mutex);
2570
2571                                 btrfs_put_delayed_ref(ref);
2572                                 cond_resched();
2573                                 goto again;
2574                         }
2575                         node = rb_next(node);
2576                 }
2577                 spin_unlock(&delayed_refs->lock);
2578                 schedule_timeout(1);
2579                 goto again;
2580         }
2581 out:
2582         spin_unlock(&delayed_refs->lock);
2583         assert_qgroups_uptodate(trans);
2584         return 0;
2585 }
2586
2587 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2588                                 struct btrfs_root *root,
2589                                 u64 bytenr, u64 num_bytes, u64 flags,
2590                                 int is_data)
2591 {
2592         struct btrfs_delayed_extent_op *extent_op;
2593         int ret;
2594
2595         extent_op = btrfs_alloc_delayed_extent_op();
2596         if (!extent_op)
2597                 return -ENOMEM;
2598
2599         extent_op->flags_to_set = flags;
2600         extent_op->update_flags = 1;
2601         extent_op->update_key = 0;
2602         extent_op->is_data = is_data ? 1 : 0;
2603
2604         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2605                                           num_bytes, extent_op);
2606         if (ret)
2607                 btrfs_free_delayed_extent_op(extent_op);
2608         return ret;
2609 }
2610
2611 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2612                                       struct btrfs_root *root,
2613                                       struct btrfs_path *path,
2614                                       u64 objectid, u64 offset, u64 bytenr)
2615 {
2616         struct btrfs_delayed_ref_head *head;
2617         struct btrfs_delayed_ref_node *ref;
2618         struct btrfs_delayed_data_ref *data_ref;
2619         struct btrfs_delayed_ref_root *delayed_refs;
2620         struct rb_node *node;
2621         int ret = 0;
2622
2623         ret = -ENOENT;
2624         delayed_refs = &trans->transaction->delayed_refs;
2625         spin_lock(&delayed_refs->lock);
2626         head = btrfs_find_delayed_ref_head(trans, bytenr);
2627         if (!head)
2628                 goto out;
2629
2630         if (!mutex_trylock(&head->mutex)) {
2631                 atomic_inc(&head->node.refs);
2632                 spin_unlock(&delayed_refs->lock);
2633
2634                 btrfs_release_path(path);
2635
2636                 /*
2637                  * Mutex was contended, block until it's released and let
2638                  * caller try again
2639                  */
2640                 mutex_lock(&head->mutex);
2641                 mutex_unlock(&head->mutex);
2642                 btrfs_put_delayed_ref(&head->node);
2643                 return -EAGAIN;
2644         }
2645
2646         node = rb_prev(&head->node.rb_node);
2647         if (!node)
2648                 goto out_unlock;
2649
2650         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2651
2652         if (ref->bytenr != bytenr)
2653                 goto out_unlock;
2654
2655         ret = 1;
2656         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2657                 goto out_unlock;
2658
2659         data_ref = btrfs_delayed_node_to_data_ref(ref);
2660
2661         node = rb_prev(node);
2662         if (node) {
2663                 int seq = ref->seq;
2664
2665                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2666                 if (ref->bytenr == bytenr && ref->seq == seq)
2667                         goto out_unlock;
2668         }
2669
2670         if (data_ref->root != root->root_key.objectid ||
2671             data_ref->objectid != objectid || data_ref->offset != offset)
2672                 goto out_unlock;
2673
2674         ret = 0;
2675 out_unlock:
2676         mutex_unlock(&head->mutex);
2677 out:
2678         spin_unlock(&delayed_refs->lock);
2679         return ret;
2680 }
2681
2682 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2683                                         struct btrfs_root *root,
2684                                         struct btrfs_path *path,
2685                                         u64 objectid, u64 offset, u64 bytenr)
2686 {
2687         struct btrfs_root *extent_root = root->fs_info->extent_root;
2688         struct extent_buffer *leaf;
2689         struct btrfs_extent_data_ref *ref;
2690         struct btrfs_extent_inline_ref *iref;
2691         struct btrfs_extent_item *ei;
2692         struct btrfs_key key;
2693         u32 item_size;
2694         int ret;
2695
2696         key.objectid = bytenr;
2697         key.offset = (u64)-1;
2698         key.type = BTRFS_EXTENT_ITEM_KEY;
2699
2700         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2701         if (ret < 0)
2702                 goto out;
2703         BUG_ON(ret == 0); /* Corruption */
2704
2705         ret = -ENOENT;
2706         if (path->slots[0] == 0)
2707                 goto out;
2708
2709         path->slots[0]--;
2710         leaf = path->nodes[0];
2711         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2712
2713         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2714                 goto out;
2715
2716         ret = 1;
2717         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2718 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2719         if (item_size < sizeof(*ei)) {
2720                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2721                 goto out;
2722         }
2723 #endif
2724         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2725
2726         if (item_size != sizeof(*ei) +
2727             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2728                 goto out;
2729
2730         if (btrfs_extent_generation(leaf, ei) <=
2731             btrfs_root_last_snapshot(&root->root_item))
2732                 goto out;
2733
2734         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2735         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2736             BTRFS_EXTENT_DATA_REF_KEY)
2737                 goto out;
2738
2739         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2740         if (btrfs_extent_refs(leaf, ei) !=
2741             btrfs_extent_data_ref_count(leaf, ref) ||
2742             btrfs_extent_data_ref_root(leaf, ref) !=
2743             root->root_key.objectid ||
2744             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2745             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2746                 goto out;
2747
2748         ret = 0;
2749 out:
2750         return ret;
2751 }
2752
2753 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2754                           struct btrfs_root *root,
2755                           u64 objectid, u64 offset, u64 bytenr)
2756 {
2757         struct btrfs_path *path;
2758         int ret;
2759         int ret2;
2760
2761         path = btrfs_alloc_path();
2762         if (!path)
2763                 return -ENOENT;
2764
2765         do {
2766                 ret = check_committed_ref(trans, root, path, objectid,
2767                                           offset, bytenr);
2768                 if (ret && ret != -ENOENT)
2769                         goto out;
2770
2771                 ret2 = check_delayed_ref(trans, root, path, objectid,
2772                                          offset, bytenr);
2773         } while (ret2 == -EAGAIN);
2774
2775         if (ret2 && ret2 != -ENOENT) {
2776                 ret = ret2;
2777                 goto out;
2778         }
2779
2780         if (ret != -ENOENT || ret2 != -ENOENT)
2781                 ret = 0;
2782 out:
2783         btrfs_free_path(path);
2784         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2785                 WARN_ON(ret > 0);
2786         return ret;
2787 }
2788
2789 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2790                            struct btrfs_root *root,
2791                            struct extent_buffer *buf,
2792                            int full_backref, int inc, int for_cow)
2793 {
2794         u64 bytenr;
2795         u64 num_bytes;
2796         u64 parent;
2797         u64 ref_root;
2798         u32 nritems;
2799         struct btrfs_key key;
2800         struct btrfs_file_extent_item *fi;
2801         int i;
2802         int level;
2803         int ret = 0;
2804         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2805                             u64, u64, u64, u64, u64, u64, int);
2806
2807         ref_root = btrfs_header_owner(buf);
2808         nritems = btrfs_header_nritems(buf);
2809         level = btrfs_header_level(buf);
2810
2811         if (!root->ref_cows && level == 0)
2812                 return 0;
2813
2814         if (inc)
2815                 process_func = btrfs_inc_extent_ref;
2816         else
2817                 process_func = btrfs_free_extent;
2818
2819         if (full_backref)
2820                 parent = buf->start;
2821         else
2822                 parent = 0;
2823
2824         for (i = 0; i < nritems; i++) {
2825                 if (level == 0) {
2826                         btrfs_item_key_to_cpu(buf, &key, i);
2827                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2828                                 continue;
2829                         fi = btrfs_item_ptr(buf, i,
2830                                             struct btrfs_file_extent_item);
2831                         if (btrfs_file_extent_type(buf, fi) ==
2832                             BTRFS_FILE_EXTENT_INLINE)
2833                                 continue;
2834                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2835                         if (bytenr == 0)
2836                                 continue;
2837
2838                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2839                         key.offset -= btrfs_file_extent_offset(buf, fi);
2840                         ret = process_func(trans, root, bytenr, num_bytes,
2841                                            parent, ref_root, key.objectid,
2842                                            key.offset, for_cow);
2843                         if (ret)
2844                                 goto fail;
2845                 } else {
2846                         bytenr = btrfs_node_blockptr(buf, i);
2847                         num_bytes = btrfs_level_size(root, level - 1);
2848                         ret = process_func(trans, root, bytenr, num_bytes,
2849                                            parent, ref_root, level - 1, 0,
2850                                            for_cow);
2851                         if (ret)
2852                                 goto fail;
2853                 }
2854         }
2855         return 0;
2856 fail:
2857         return ret;
2858 }
2859
2860 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2861                   struct extent_buffer *buf, int full_backref, int for_cow)
2862 {
2863         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2864 }
2865
2866 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2867                   struct extent_buffer *buf, int full_backref, int for_cow)
2868 {
2869         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2870 }
2871
2872 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2873                                  struct btrfs_root *root,
2874                                  struct btrfs_path *path,
2875                                  struct btrfs_block_group_cache *cache)
2876 {
2877         int ret;
2878         struct btrfs_root *extent_root = root->fs_info->extent_root;
2879         unsigned long bi;
2880         struct extent_buffer *leaf;
2881
2882         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2883         if (ret < 0)
2884                 goto fail;
2885         BUG_ON(ret); /* Corruption */
2886
2887         leaf = path->nodes[0];
2888         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2889         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2890         btrfs_mark_buffer_dirty(leaf);
2891         btrfs_release_path(path);
2892 fail:
2893         if (ret) {
2894                 btrfs_abort_transaction(trans, root, ret);
2895                 return ret;
2896         }
2897         return 0;
2898
2899 }
2900
2901 static struct btrfs_block_group_cache *
2902 next_block_group(struct btrfs_root *root,
2903                  struct btrfs_block_group_cache *cache)
2904 {
2905         struct rb_node *node;
2906         spin_lock(&root->fs_info->block_group_cache_lock);
2907         node = rb_next(&cache->cache_node);
2908         btrfs_put_block_group(cache);
2909         if (node) {
2910                 cache = rb_entry(node, struct btrfs_block_group_cache,
2911                                  cache_node);
2912                 btrfs_get_block_group(cache);
2913         } else
2914                 cache = NULL;
2915         spin_unlock(&root->fs_info->block_group_cache_lock);
2916         return cache;
2917 }
2918
2919 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2920                             struct btrfs_trans_handle *trans,
2921                             struct btrfs_path *path)
2922 {
2923         struct btrfs_root *root = block_group->fs_info->tree_root;
2924         struct inode *inode = NULL;
2925         u64 alloc_hint = 0;
2926         int dcs = BTRFS_DC_ERROR;
2927         int num_pages = 0;
2928         int retries = 0;
2929         int ret = 0;
2930
2931         /*
2932          * If this block group is smaller than 100 megs don't bother caching the
2933          * block group.
2934          */
2935         if (block_group->key.offset < (100 * 1024 * 1024)) {
2936                 spin_lock(&block_group->lock);
2937                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2938                 spin_unlock(&block_group->lock);
2939                 return 0;
2940         }
2941
2942 again:
2943         inode = lookup_free_space_inode(root, block_group, path);
2944         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2945                 ret = PTR_ERR(inode);
2946                 btrfs_release_path(path);
2947                 goto out;
2948         }
2949
2950         if (IS_ERR(inode)) {
2951                 BUG_ON(retries);
2952                 retries++;
2953
2954                 if (block_group->ro)
2955                         goto out_free;
2956
2957                 ret = create_free_space_inode(root, trans, block_group, path);
2958                 if (ret)
2959                         goto out_free;
2960                 goto again;
2961         }
2962
2963         /* We've already setup this transaction, go ahead and exit */
2964         if (block_group->cache_generation == trans->transid &&
2965             i_size_read(inode)) {
2966                 dcs = BTRFS_DC_SETUP;
2967                 goto out_put;
2968         }
2969
2970         /*
2971          * We want to set the generation to 0, that way if anything goes wrong
2972          * from here on out we know not to trust this cache when we load up next
2973          * time.
2974          */
2975         BTRFS_I(inode)->generation = 0;
2976         ret = btrfs_update_inode(trans, root, inode);
2977         WARN_ON(ret);
2978
2979         if (i_size_read(inode) > 0) {
2980                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2981                                                       inode);
2982                 if (ret)
2983                         goto out_put;
2984         }
2985
2986         spin_lock(&block_group->lock);
2987         if (block_group->cached != BTRFS_CACHE_FINISHED ||
2988             !btrfs_test_opt(root, SPACE_CACHE)) {
2989                 /*
2990                  * don't bother trying to write stuff out _if_
2991                  * a) we're not cached,
2992                  * b) we're with nospace_cache mount option.
2993                  */
2994                 dcs = BTRFS_DC_WRITTEN;
2995                 spin_unlock(&block_group->lock);
2996                 goto out_put;
2997         }
2998         spin_unlock(&block_group->lock);
2999
3000         /*
3001          * Try to preallocate enough space based on how big the block group is.
3002          * Keep in mind this has to include any pinned space which could end up
3003          * taking up quite a bit since it's not folded into the other space
3004          * cache.
3005          */
3006         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3007         if (!num_pages)
3008                 num_pages = 1;
3009
3010         num_pages *= 16;
3011         num_pages *= PAGE_CACHE_SIZE;
3012
3013         ret = btrfs_check_data_free_space(inode, num_pages);
3014         if (ret)
3015                 goto out_put;
3016
3017         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3018                                               num_pages, num_pages,
3019                                               &alloc_hint);
3020         if (!ret)
3021                 dcs = BTRFS_DC_SETUP;
3022         btrfs_free_reserved_data_space(inode, num_pages);
3023
3024 out_put:
3025         iput(inode);
3026 out_free:
3027         btrfs_release_path(path);
3028 out:
3029         spin_lock(&block_group->lock);
3030         if (!ret && dcs == BTRFS_DC_SETUP)
3031                 block_group->cache_generation = trans->transid;
3032         block_group->disk_cache_state = dcs;
3033         spin_unlock(&block_group->lock);
3034
3035         return ret;
3036 }
3037
3038 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3039                                    struct btrfs_root *root)
3040 {
3041         struct btrfs_block_group_cache *cache;
3042         int err = 0;
3043         struct btrfs_path *path;
3044         u64 last = 0;
3045
3046         path = btrfs_alloc_path();
3047         if (!path)
3048                 return -ENOMEM;
3049
3050 again:
3051         while (1) {
3052                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3053                 while (cache) {
3054                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3055                                 break;
3056                         cache = next_block_group(root, cache);
3057                 }
3058                 if (!cache) {
3059                         if (last == 0)
3060                                 break;
3061                         last = 0;
3062                         continue;
3063                 }
3064                 err = cache_save_setup(cache, trans, path);
3065                 last = cache->key.objectid + cache->key.offset;
3066                 btrfs_put_block_group(cache);
3067         }
3068
3069         while (1) {
3070                 if (last == 0) {
3071                         err = btrfs_run_delayed_refs(trans, root,
3072                                                      (unsigned long)-1);
3073                         if (err) /* File system offline */
3074                                 goto out;
3075                 }
3076
3077                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3078                 while (cache) {
3079                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3080                                 btrfs_put_block_group(cache);
3081                                 goto again;
3082                         }
3083
3084                         if (cache->dirty)
3085                                 break;
3086                         cache = next_block_group(root, cache);
3087                 }
3088                 if (!cache) {
3089                         if (last == 0)
3090                                 break;
3091                         last = 0;
3092                         continue;
3093                 }
3094
3095                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3096                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3097                 cache->dirty = 0;
3098                 last = cache->key.objectid + cache->key.offset;
3099
3100                 err = write_one_cache_group(trans, root, path, cache);
3101                 if (err) /* File system offline */
3102                         goto out;
3103
3104                 btrfs_put_block_group(cache);
3105         }
3106
3107         while (1) {
3108                 /*
3109                  * I don't think this is needed since we're just marking our
3110                  * preallocated extent as written, but just in case it can't
3111                  * hurt.
3112                  */
3113                 if (last == 0) {
3114                         err = btrfs_run_delayed_refs(trans, root,
3115                                                      (unsigned long)-1);
3116                         if (err) /* File system offline */
3117                                 goto out;
3118                 }
3119
3120                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3121                 while (cache) {
3122                         /*
3123                          * Really this shouldn't happen, but it could if we
3124                          * couldn't write the entire preallocated extent and
3125                          * splitting the extent resulted in a new block.
3126                          */
3127                         if (cache->dirty) {
3128                                 btrfs_put_block_group(cache);
3129                                 goto again;
3130                         }
3131                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3132                                 break;
3133                         cache = next_block_group(root, cache);
3134                 }
3135                 if (!cache) {
3136                         if (last == 0)
3137                                 break;
3138                         last = 0;
3139                         continue;
3140                 }
3141
3142                 err = btrfs_write_out_cache(root, trans, cache, path);
3143
3144                 /*
3145                  * If we didn't have an error then the cache state is still
3146                  * NEED_WRITE, so we can set it to WRITTEN.
3147                  */
3148                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3149                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3150                 last = cache->key.objectid + cache->key.offset;
3151                 btrfs_put_block_group(cache);
3152         }
3153 out:
3154
3155         btrfs_free_path(path);
3156         return err;
3157 }
3158
3159 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3160 {
3161         struct btrfs_block_group_cache *block_group;
3162         int readonly = 0;
3163
3164         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3165         if (!block_group || block_group->ro)
3166                 readonly = 1;
3167         if (block_group)
3168                 btrfs_put_block_group(block_group);
3169         return readonly;
3170 }
3171
3172 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3173                              u64 total_bytes, u64 bytes_used,
3174                              struct btrfs_space_info **space_info)
3175 {
3176         struct btrfs_space_info *found;
3177         int i;
3178         int factor;
3179
3180         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3181                      BTRFS_BLOCK_GROUP_RAID10))
3182                 factor = 2;
3183         else
3184                 factor = 1;
3185
3186         found = __find_space_info(info, flags);
3187         if (found) {
3188                 spin_lock(&found->lock);
3189                 found->total_bytes += total_bytes;
3190                 found->disk_total += total_bytes * factor;
3191                 found->bytes_used += bytes_used;
3192                 found->disk_used += bytes_used * factor;
3193                 found->full = 0;
3194                 spin_unlock(&found->lock);
3195                 *space_info = found;
3196                 return 0;
3197         }
3198         found = kzalloc(sizeof(*found), GFP_NOFS);
3199         if (!found)
3200                 return -ENOMEM;
3201
3202         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3203                 INIT_LIST_HEAD(&found->block_groups[i]);
3204         init_rwsem(&found->groups_sem);
3205         spin_lock_init(&found->lock);
3206         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3207         found->total_bytes = total_bytes;
3208         found->disk_total = total_bytes * factor;
3209         found->bytes_used = bytes_used;
3210         found->disk_used = bytes_used * factor;
3211         found->bytes_pinned = 0;
3212         found->bytes_reserved = 0;
3213         found->bytes_readonly = 0;
3214         found->bytes_may_use = 0;
3215         found->full = 0;
3216         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3217         found->chunk_alloc = 0;
3218         found->flush = 0;
3219         init_waitqueue_head(&found->wait);
3220         *space_info = found;
3221         list_add_rcu(&found->list, &info->space_info);
3222         if (flags & BTRFS_BLOCK_GROUP_DATA)
3223                 info->data_sinfo = found;
3224         return 0;
3225 }
3226
3227 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3228 {
3229         u64 extra_flags = chunk_to_extended(flags) &
3230                                 BTRFS_EXTENDED_PROFILE_MASK;
3231
3232         write_seqlock(&fs_info->profiles_lock);
3233         if (flags & BTRFS_BLOCK_GROUP_DATA)
3234                 fs_info->avail_data_alloc_bits |= extra_flags;
3235         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3236                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3237         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3238                 fs_info->avail_system_alloc_bits |= extra_flags;
3239         write_sequnlock(&fs_info->profiles_lock);
3240 }
3241
3242 /*
3243  * returns target flags in extended format or 0 if restripe for this
3244  * chunk_type is not in progress
3245  *
3246  * should be called with either volume_mutex or balance_lock held
3247  */
3248 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3249 {
3250         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3251         u64 target = 0;
3252
3253         if (!bctl)
3254                 return 0;
3255
3256         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3257             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3258                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3259         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3260                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3261                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3262         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3263                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3264                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3265         }
3266
3267         return target;
3268 }
3269
3270 /*
3271  * @flags: available profiles in extended format (see ctree.h)
3272  *
3273  * Returns reduced profile in chunk format.  If profile changing is in
3274  * progress (either running or paused) picks the target profile (if it's
3275  * already available), otherwise falls back to plain reducing.
3276  */
3277 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3278 {
3279         /*
3280          * we add in the count of missing devices because we want
3281          * to make sure that any RAID levels on a degraded FS
3282          * continue to be honored.
3283          */
3284         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3285                 root->fs_info->fs_devices->missing_devices;
3286         u64 target;
3287
3288         /*
3289          * see if restripe for this chunk_type is in progress, if so
3290          * try to reduce to the target profile
3291          */
3292         spin_lock(&root->fs_info->balance_lock);
3293         target = get_restripe_target(root->fs_info, flags);
3294         if (target) {
3295                 /* pick target profile only if it's already available */
3296                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3297                         spin_unlock(&root->fs_info->balance_lock);
3298                         return extended_to_chunk(target);
3299                 }
3300         }
3301         spin_unlock(&root->fs_info->balance_lock);
3302
3303         if (num_devices == 1)
3304                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3305         if (num_devices < 4)
3306                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3307
3308         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3309             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3310                       BTRFS_BLOCK_GROUP_RAID10))) {
3311                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3312         }
3313
3314         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3315             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3316                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3317         }
3318
3319         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3320             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3321              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3322              (flags & BTRFS_BLOCK_GROUP_DUP))) {
3323                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3324         }
3325
3326         return extended_to_chunk(flags);
3327 }
3328
3329 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3330 {
3331         unsigned seq;
3332
3333         do {
3334                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3335
3336                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3337                         flags |= root->fs_info->avail_data_alloc_bits;
3338                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3339                         flags |= root->fs_info->avail_system_alloc_bits;
3340                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3341                         flags |= root->fs_info->avail_metadata_alloc_bits;
3342         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3343
3344         return btrfs_reduce_alloc_profile(root, flags);
3345 }
3346
3347 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3348 {
3349         u64 flags;
3350
3351         if (data)
3352                 flags = BTRFS_BLOCK_GROUP_DATA;
3353         else if (root == root->fs_info->chunk_root)
3354                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3355         else
3356                 flags = BTRFS_BLOCK_GROUP_METADATA;
3357
3358         return get_alloc_profile(root, flags);
3359 }
3360
3361 /*
3362  * This will check the space that the inode allocates from to make sure we have
3363  * enough space for bytes.
3364  */
3365 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3366 {
3367         struct btrfs_space_info *data_sinfo;
3368         struct btrfs_root *root = BTRFS_I(inode)->root;
3369         struct btrfs_fs_info *fs_info = root->fs_info;
3370         u64 used;
3371         int ret = 0, committed = 0, alloc_chunk = 1;
3372
3373         /* make sure bytes are sectorsize aligned */
3374         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3375
3376         if (root == root->fs_info->tree_root ||
3377             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3378                 alloc_chunk = 0;
3379                 committed = 1;
3380         }
3381
3382         data_sinfo = fs_info->data_sinfo;
3383         if (!data_sinfo)
3384                 goto alloc;
3385
3386 again:
3387         /* make sure we have enough space to handle the data first */
3388         spin_lock(&data_sinfo->lock);
3389         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3390                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3391                 data_sinfo->bytes_may_use;
3392
3393         if (used + bytes > data_sinfo->total_bytes) {
3394                 struct btrfs_trans_handle *trans;
3395
3396                 /*
3397                  * if we don't have enough free bytes in this space then we need
3398                  * to alloc a new chunk.
3399                  */
3400                 if (!data_sinfo->full && alloc_chunk) {
3401                         u64 alloc_target;
3402
3403                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3404                         spin_unlock(&data_sinfo->lock);
3405 alloc:
3406                         alloc_target = btrfs_get_alloc_profile(root, 1);
3407                         trans = btrfs_join_transaction(root);
3408                         if (IS_ERR(trans))
3409                                 return PTR_ERR(trans);
3410
3411                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3412                                              alloc_target,
3413                                              CHUNK_ALLOC_NO_FORCE);
3414                         btrfs_end_transaction(trans, root);
3415                         if (ret < 0) {
3416                                 if (ret != -ENOSPC)
3417                                         return ret;
3418                                 else
3419                                         goto commit_trans;
3420                         }
3421
3422                         if (!data_sinfo)
3423                                 data_sinfo = fs_info->data_sinfo;
3424
3425                         goto again;
3426                 }
3427
3428                 /*
3429                  * If we have less pinned bytes than we want to allocate then
3430                  * don't bother committing the transaction, it won't help us.
3431                  */
3432                 if (data_sinfo->bytes_pinned < bytes)
3433                         committed = 1;
3434                 spin_unlock(&data_sinfo->lock);
3435
3436                 /* commit the current transaction and try again */
3437 commit_trans:
3438                 if (!committed &&
3439                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3440                         committed = 1;
3441                         trans = btrfs_join_transaction(root);
3442                         if (IS_ERR(trans))
3443                                 return PTR_ERR(trans);
3444                         ret = btrfs_commit_transaction(trans, root);
3445                         if (ret)
3446                                 return ret;
3447                         goto again;
3448                 }
3449
3450                 return -ENOSPC;
3451         }
3452         data_sinfo->bytes_may_use += bytes;
3453         trace_btrfs_space_reservation(root->fs_info, "space_info",
3454                                       data_sinfo->flags, bytes, 1);
3455         spin_unlock(&data_sinfo->lock);
3456
3457         return 0;
3458 }
3459
3460 /*
3461  * Called if we need to clear a data reservation for this inode.
3462  */
3463 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3464 {
3465         struct btrfs_root *root = BTRFS_I(inode)->root;
3466         struct btrfs_space_info *data_sinfo;
3467
3468         /* make sure bytes are sectorsize aligned */
3469         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3470
3471         data_sinfo = root->fs_info->data_sinfo;
3472         spin_lock(&data_sinfo->lock);
3473         data_sinfo->bytes_may_use -= bytes;
3474         trace_btrfs_space_reservation(root->fs_info, "space_info",
3475                                       data_sinfo->flags, bytes, 0);
3476         spin_unlock(&data_sinfo->lock);
3477 }
3478
3479 static void force_metadata_allocation(struct btrfs_fs_info *info)
3480 {
3481         struct list_head *head = &info->space_info;
3482         struct btrfs_space_info *found;
3483
3484         rcu_read_lock();
3485         list_for_each_entry_rcu(found, head, list) {
3486                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3487                         found->force_alloc = CHUNK_ALLOC_FORCE;
3488         }
3489         rcu_read_unlock();
3490 }
3491
3492 static int should_alloc_chunk(struct btrfs_root *root,
3493                               struct btrfs_space_info *sinfo, int force)
3494 {
3495         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3496         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3497         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3498         u64 thresh;
3499
3500         if (force == CHUNK_ALLOC_FORCE)
3501                 return 1;
3502
3503         /*
3504          * We need to take into account the global rsv because for all intents
3505          * and purposes it's used space.  Don't worry about locking the
3506          * global_rsv, it doesn't change except when the transaction commits.
3507          */
3508         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3509                 num_allocated += global_rsv->size;
3510
3511         /*
3512          * in limited mode, we want to have some free space up to
3513          * about 1% of the FS size.
3514          */
3515         if (force == CHUNK_ALLOC_LIMITED) {
3516                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3517                 thresh = max_t(u64, 64 * 1024 * 1024,
3518                                div_factor_fine(thresh, 1));
3519
3520                 if (num_bytes - num_allocated < thresh)
3521                         return 1;
3522         }
3523
3524         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3525                 return 0;
3526         return 1;
3527 }
3528
3529 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3530 {
3531         u64 num_dev;
3532
3533         if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3534             type & BTRFS_BLOCK_GROUP_RAID0)
3535                 num_dev = root->fs_info->fs_devices->rw_devices;
3536         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3537                 num_dev = 2;
3538         else
3539                 num_dev = 1;    /* DUP or single */
3540
3541         /* metadata for updaing devices and chunk tree */
3542         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3543 }
3544
3545 static void check_system_chunk(struct btrfs_trans_handle *trans,
3546                                struct btrfs_root *root, u64 type)
3547 {
3548         struct btrfs_space_info *info;
3549         u64 left;
3550         u64 thresh;
3551
3552         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3553         spin_lock(&info->lock);
3554         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3555                 info->bytes_reserved - info->bytes_readonly;
3556         spin_unlock(&info->lock);
3557
3558         thresh = get_system_chunk_thresh(root, type);
3559         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3560                 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3561                        left, thresh, type);
3562                 dump_space_info(info, 0, 0);
3563         }
3564
3565         if (left < thresh) {
3566                 u64 flags;
3567
3568                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3569                 btrfs_alloc_chunk(trans, root, flags);
3570         }
3571 }
3572
3573 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3574                           struct btrfs_root *extent_root, u64 flags, int force)
3575 {
3576         struct btrfs_space_info *space_info;
3577         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3578         int wait_for_alloc = 0;
3579         int ret = 0;
3580
3581         /* Don't re-enter if we're already allocating a chunk */
3582         if (trans->allocating_chunk)
3583                 return -ENOSPC;
3584
3585         space_info = __find_space_info(extent_root->fs_info, flags);
3586         if (!space_info) {
3587                 ret = update_space_info(extent_root->fs_info, flags,
3588                                         0, 0, &space_info);
3589                 BUG_ON(ret); /* -ENOMEM */
3590         }
3591         BUG_ON(!space_info); /* Logic error */
3592
3593 again:
3594         spin_lock(&space_info->lock);
3595         if (force < space_info->force_alloc)
3596                 force = space_info->force_alloc;
3597         if (space_info->full) {
3598                 spin_unlock(&space_info->lock);
3599                 return 0;
3600         }
3601
3602         if (!should_alloc_chunk(extent_root, space_info, force)) {
3603                 spin_unlock(&space_info->lock);
3604                 return 0;
3605         } else if (space_info->chunk_alloc) {
3606                 wait_for_alloc = 1;
3607         } else {
3608                 space_info->chunk_alloc = 1;
3609         }
3610
3611         spin_unlock(&space_info->lock);
3612
3613         mutex_lock(&fs_info->chunk_mutex);
3614
3615         /*
3616          * The chunk_mutex is held throughout the entirety of a chunk
3617          * allocation, so once we've acquired the chunk_mutex we know that the
3618          * other guy is done and we need to recheck and see if we should
3619          * allocate.
3620          */
3621         if (wait_for_alloc) {
3622                 mutex_unlock(&fs_info->chunk_mutex);
3623                 wait_for_alloc = 0;
3624                 goto again;
3625         }
3626
3627         trans->allocating_chunk = true;
3628
3629         /*
3630          * If we have mixed data/metadata chunks we want to make sure we keep
3631          * allocating mixed chunks instead of individual chunks.
3632          */
3633         if (btrfs_mixed_space_info(space_info))
3634                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3635
3636         /*
3637          * if we're doing a data chunk, go ahead and make sure that
3638          * we keep a reasonable number of metadata chunks allocated in the
3639          * FS as well.
3640          */
3641         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3642                 fs_info->data_chunk_allocations++;
3643                 if (!(fs_info->data_chunk_allocations %
3644                       fs_info->metadata_ratio))
3645                         force_metadata_allocation(fs_info);
3646         }
3647
3648         /*
3649          * Check if we have enough space in SYSTEM chunk because we may need
3650          * to update devices.
3651          */
3652         check_system_chunk(trans, extent_root, flags);
3653
3654         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3655         trans->allocating_chunk = false;
3656         if (ret < 0 && ret != -ENOSPC)
3657                 goto out;
3658
3659         spin_lock(&space_info->lock);
3660         if (ret)
3661                 space_info->full = 1;
3662         else
3663                 ret = 1;
3664
3665         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3666         space_info->chunk_alloc = 0;
3667         spin_unlock(&space_info->lock);
3668 out:
3669         mutex_unlock(&fs_info->chunk_mutex);
3670         return ret;
3671 }
3672
3673 static int can_overcommit(struct btrfs_root *root,
3674                           struct btrfs_space_info *space_info, u64 bytes,
3675                           enum btrfs_reserve_flush_enum flush)
3676 {
3677         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3678         u64 profile = btrfs_get_alloc_profile(root, 0);
3679         u64 rsv_size = 0;
3680         u64 avail;
3681         u64 used;
3682         u64 to_add;
3683
3684         used = space_info->bytes_used + space_info->bytes_reserved +
3685                 space_info->bytes_pinned + space_info->bytes_readonly;
3686
3687         spin_lock(&global_rsv->lock);
3688         rsv_size = global_rsv->size;
3689         spin_unlock(&global_rsv->lock);
3690
3691         /*
3692          * We only want to allow over committing if we have lots of actual space
3693          * free, but if we don't have enough space to handle the global reserve
3694          * space then we could end up having a real enospc problem when trying
3695          * to allocate a chunk or some other such important allocation.
3696          */
3697         rsv_size <<= 1;
3698         if (used + rsv_size >= space_info->total_bytes)
3699                 return 0;
3700
3701         used += space_info->bytes_may_use;
3702
3703         spin_lock(&root->fs_info->free_chunk_lock);
3704         avail = root->fs_info->free_chunk_space;
3705         spin_unlock(&root->fs_info->free_chunk_lock);
3706
3707         /*
3708          * If we have dup, raid1 or raid10 then only half of the free
3709          * space is actually useable.
3710          */
3711         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3712                        BTRFS_BLOCK_GROUP_RAID1 |
3713                        BTRFS_BLOCK_GROUP_RAID10))
3714                 avail >>= 1;
3715
3716         to_add = space_info->total_bytes;
3717
3718         /*
3719          * If we aren't flushing all things, let us overcommit up to
3720          * 1/2th of the space. If we can flush, don't let us overcommit
3721          * too much, let it overcommit up to 1/8 of the space.
3722          */
3723         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3724                 to_add >>= 3;
3725         else
3726                 to_add >>= 1;
3727
3728         /*
3729          * Limit the overcommit to the amount of free space we could possibly
3730          * allocate for chunks.
3731          */
3732         to_add = min(avail, to_add);
3733
3734         if (used + bytes < space_info->total_bytes + to_add)
3735                 return 1;
3736         return 0;
3737 }
3738
3739 static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
3740                                                       unsigned long nr_pages,
3741                                                       enum wb_reason reason)
3742 {
3743         /* the flusher is dealing with the dirty inodes now. */
3744         if (writeback_in_progress(sb->s_bdi))
3745                 return 1;
3746
3747         if (down_read_trylock(&sb->s_umount)) {
3748                 writeback_inodes_sb_nr(sb, nr_pages, reason);
3749                 up_read(&sb->s_umount);
3750                 return 1;
3751         }
3752
3753         return 0;
3754 }
3755
3756 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3757                                   unsigned long nr_pages)
3758 {
3759         struct super_block *sb = root->fs_info->sb;
3760         int started;
3761
3762         /* If we can not start writeback, just sync all the delalloc file. */
3763         started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages,
3764                                                       WB_REASON_FS_FREE_SPACE);
3765         if (!started) {
3766                 /*
3767                  * We needn't worry the filesystem going from r/w to r/o though
3768                  * we don't acquire ->s_umount mutex, because the filesystem
3769                  * should guarantee the delalloc inodes list be empty after
3770                  * the filesystem is readonly(all dirty pages are written to
3771                  * the disk).
3772                  */
3773                 btrfs_start_delalloc_inodes(root, 0);
3774                 btrfs_wait_ordered_extents(root, 0);
3775         }
3776 }
3777
3778 /*
3779  * shrink metadata reservation for delalloc
3780  */
3781 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3782                             bool wait_ordered)
3783 {
3784         struct btrfs_block_rsv *block_rsv;
3785         struct btrfs_space_info *space_info;
3786         struct btrfs_trans_handle *trans;
3787         u64 delalloc_bytes;
3788         u64 max_reclaim;
3789         long time_left;
3790         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3791         int loops = 0;
3792         enum btrfs_reserve_flush_enum flush;
3793
3794         trans = (struct btrfs_trans_handle *)current->journal_info;
3795         block_rsv = &root->fs_info->delalloc_block_rsv;
3796         space_info = block_rsv->space_info;
3797
3798         smp_mb();
3799         delalloc_bytes = percpu_counter_sum_positive(
3800                                                 &root->fs_info->delalloc_bytes);
3801         if (delalloc_bytes == 0) {
3802                 if (trans)
3803                         return;
3804                 btrfs_wait_ordered_extents(root, 0);
3805                 return;
3806         }
3807
3808         while (delalloc_bytes && loops < 3) {
3809                 max_reclaim = min(delalloc_bytes, to_reclaim);
3810                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3811                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3812                 /*
3813                  * We need to wait for the async pages to actually start before
3814                  * we do anything.
3815                  */
3816                 wait_event(root->fs_info->async_submit_wait,
3817                            !atomic_read(&root->fs_info->async_delalloc_pages));
3818
3819                 if (!trans)
3820                         flush = BTRFS_RESERVE_FLUSH_ALL;
3821                 else
3822                         flush = BTRFS_RESERVE_NO_FLUSH;
3823                 spin_lock(&space_info->lock);
3824                 if (can_overcommit(root, space_info, orig, flush)) {
3825                         spin_unlock(&space_info->lock);
3826                         break;
3827                 }
3828                 spin_unlock(&space_info->lock);
3829
3830                 loops++;
3831                 if (wait_ordered && !trans) {
3832                         btrfs_wait_ordered_extents(root, 0);
3833                 } else {
3834                         time_left = schedule_timeout_killable(1);
3835                         if (time_left)
3836                                 break;
3837                 }
3838                 smp_mb();
3839                 delalloc_bytes = percpu_counter_sum_positive(
3840                                                 &root->fs_info->delalloc_bytes);
3841         }
3842 }
3843
3844 /**
3845  * maybe_commit_transaction - possibly commit the transaction if its ok to
3846  * @root - the root we're allocating for
3847  * @bytes - the number of bytes we want to reserve
3848  * @force - force the commit
3849  *
3850  * This will check to make sure that committing the transaction will actually
3851  * get us somewhere and then commit the transaction if it does.  Otherwise it
3852  * will return -ENOSPC.
3853  */
3854 static int may_commit_transaction(struct btrfs_root *root,
3855                                   struct btrfs_space_info *space_info,
3856                                   u64 bytes, int force)
3857 {
3858         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3859         struct btrfs_trans_handle *trans;
3860
3861         trans = (struct btrfs_trans_handle *)current->journal_info;
3862         if (trans)
3863                 return -EAGAIN;
3864
3865         if (force)
3866                 goto commit;
3867
3868         /* See if there is enough pinned space to make this reservation */
3869         spin_lock(&space_info->lock);
3870         if (space_info->bytes_pinned >= bytes) {
3871                 spin_unlock(&space_info->lock);
3872                 goto commit;
3873         }
3874         spin_unlock(&space_info->lock);
3875
3876         /*
3877          * See if there is some space in the delayed insertion reservation for
3878          * this reservation.
3879          */
3880         if (space_info != delayed_rsv->space_info)
3881                 return -ENOSPC;
3882
3883         spin_lock(&space_info->lock);
3884         spin_lock(&delayed_rsv->lock);
3885         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3886                 spin_unlock(&delayed_rsv->lock);
3887                 spin_unlock(&space_info->lock);
3888                 return -ENOSPC;
3889         }
3890         spin_unlock(&delayed_rsv->lock);
3891         spin_unlock(&space_info->lock);
3892
3893 commit:
3894         trans = btrfs_join_transaction(root);
3895         if (IS_ERR(trans))
3896                 return -ENOSPC;
3897
3898         return btrfs_commit_transaction(trans, root);
3899 }
3900
3901 enum flush_state {
3902         FLUSH_DELAYED_ITEMS_NR  =       1,
3903         FLUSH_DELAYED_ITEMS     =       2,
3904         FLUSH_DELALLOC          =       3,
3905         FLUSH_DELALLOC_WAIT     =       4,
3906         ALLOC_CHUNK             =       5,
3907         COMMIT_TRANS            =       6,
3908 };
3909
3910 static int flush_space(struct btrfs_root *root,
3911                        struct btrfs_space_info *space_info, u64 num_bytes,
3912                        u64 orig_bytes, int state)
3913 {
3914         struct btrfs_trans_handle *trans;
3915         int nr;
3916         int ret = 0;
3917
3918         switch (state) {
3919         case FLUSH_DELAYED_ITEMS_NR:
3920         case FLUSH_DELAYED_ITEMS:
3921                 if (state == FLUSH_DELAYED_ITEMS_NR) {
3922                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3923
3924                         nr = (int)div64_u64(num_bytes, bytes);
3925                         if (!nr)
3926                                 nr = 1;
3927                         nr *= 2;
3928                 } else {
3929                         nr = -1;
3930                 }
3931                 trans = btrfs_join_transaction(root);
3932                 if (IS_ERR(trans)) {
3933                         ret = PTR_ERR(trans);
3934                         break;
3935                 }
3936                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
3937                 btrfs_end_transaction(trans, root);
3938                 break;
3939         case FLUSH_DELALLOC:
3940         case FLUSH_DELALLOC_WAIT:
3941                 shrink_delalloc(root, num_bytes, orig_bytes,
3942                                 state == FLUSH_DELALLOC_WAIT);
3943                 break;
3944         case ALLOC_CHUNK:
3945                 trans = btrfs_join_transaction(root);
3946                 if (IS_ERR(trans)) {
3947                         ret = PTR_ERR(trans);
3948                         break;
3949                 }
3950                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3951                                      btrfs_get_alloc_profile(root, 0),
3952                                      CHUNK_ALLOC_NO_FORCE);
3953                 btrfs_end_transaction(trans, root);
3954                 if (ret == -ENOSPC)
3955                         ret = 0;
3956                 break;
3957         case COMMIT_TRANS:
3958                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3959                 break;
3960         default:
3961                 ret = -ENOSPC;
3962                 break;
3963         }
3964
3965         return ret;
3966 }
3967 /**
3968  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3969  * @root - the root we're allocating for
3970  * @block_rsv - the block_rsv we're allocating for
3971  * @orig_bytes - the number of bytes we want
3972  * @flush - whether or not we can flush to make our reservation
3973  *
3974  * This will reserve orgi_bytes number of bytes from the space info associated
3975  * with the block_rsv.  If there is not enough space it will make an attempt to
3976  * flush out space to make room.  It will do this by flushing delalloc if
3977  * possible or committing the transaction.  If flush is 0 then no attempts to
3978  * regain reservations will be made and this will fail if there is not enough
3979  * space already.
3980  */
3981 static int reserve_metadata_bytes(struct btrfs_root *root,
3982                                   struct btrfs_block_rsv *block_rsv,
3983                                   u64 orig_bytes,
3984                                   enum btrfs_reserve_flush_enum flush)
3985 {
3986         struct btrfs_space_info *space_info = block_rsv->space_info;
3987         u64 used;
3988         u64 num_bytes = orig_bytes;
3989         int flush_state = FLUSH_DELAYED_ITEMS_NR;
3990         int ret = 0;
3991         bool flushing = false;
3992
3993 again:
3994         ret = 0;
3995         spin_lock(&space_info->lock);
3996         /*
3997          * We only want to wait if somebody other than us is flushing and we
3998          * are actually allowed to flush all things.
3999          */
4000         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4001                space_info->flush) {
4002                 spin_unlock(&space_info->lock);
4003                 /*
4004                  * If we have a trans handle we can't wait because the flusher
4005                  * may have to commit the transaction, which would mean we would
4006                  * deadlock since we are waiting for the flusher to finish, but
4007                  * hold the current transaction open.
4008                  */
4009                 if (current->journal_info)
4010                         return -EAGAIN;
4011                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4012                 /* Must have been killed, return */
4013                 if (ret)
4014                         return -EINTR;
4015
4016                 spin_lock(&space_info->lock);
4017         }
4018
4019         ret = -ENOSPC;
4020         used = space_info->bytes_used + space_info->bytes_reserved +
4021                 space_info->bytes_pinned + space_info->bytes_readonly +
4022                 space_info->bytes_may_use;
4023
4024         /*
4025          * The idea here is that we've not already over-reserved the block group
4026          * then we can go ahead and save our reservation first and then start
4027          * flushing if we need to.  Otherwise if we've already overcommitted
4028          * lets start flushing stuff first and then come back and try to make
4029          * our reservation.
4030          */
4031         if (used <= space_info->total_bytes) {
4032                 if (used + orig_bytes <= space_info->total_bytes) {
4033                         space_info->bytes_may_use += orig_bytes;
4034                         trace_btrfs_space_reservation(root->fs_info,
4035                                 "space_info", space_info->flags, orig_bytes, 1);
4036                         ret = 0;
4037                 } else {
4038                         /*
4039                          * Ok set num_bytes to orig_bytes since we aren't
4040                          * overocmmitted, this way we only try and reclaim what
4041                          * we need.
4042                          */
4043                         num_bytes = orig_bytes;
4044                 }
4045         } else {
4046                 /*
4047                  * Ok we're over committed, set num_bytes to the overcommitted
4048                  * amount plus the amount of bytes that we need for this
4049                  * reservation.
4050                  */
4051                 num_bytes = used - space_info->total_bytes +
4052                         (orig_bytes * 2);
4053         }
4054
4055         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4056                 space_info->bytes_may_use += orig_bytes;
4057                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4058                                               space_info->flags, orig_bytes,
4059                                               1);
4060                 ret = 0;
4061         }
4062
4063         /*
4064          * Couldn't make our reservation, save our place so while we're trying
4065          * to reclaim space we can actually use it instead of somebody else
4066          * stealing it from us.
4067          *
4068          * We make the other tasks wait for the flush only when we can flush
4069          * all things.
4070          */
4071         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4072                 flushing = true;
4073                 space_info->flush = 1;
4074         }
4075
4076         spin_unlock(&space_info->lock);
4077
4078         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4079                 goto out;
4080
4081         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4082                           flush_state);
4083         flush_state++;
4084
4085         /*
4086          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4087          * would happen. So skip delalloc flush.
4088          */
4089         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4090             (flush_state == FLUSH_DELALLOC ||
4091              flush_state == FLUSH_DELALLOC_WAIT))
4092                 flush_state = ALLOC_CHUNK;
4093
4094         if (!ret)
4095                 goto again;
4096         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4097                  flush_state < COMMIT_TRANS)
4098                 goto again;
4099         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4100                  flush_state <= COMMIT_TRANS)
4101                 goto again;
4102
4103 out:
4104         if (ret == -ENOSPC &&
4105             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4106                 struct btrfs_block_rsv *global_rsv =
4107                         &root->fs_info->global_block_rsv;
4108
4109                 if (block_rsv != global_rsv &&
4110                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4111                         ret = 0;
4112         }
4113         if (flushing) {
4114                 spin_lock(&space_info->lock);
4115                 space_info->flush = 0;
4116                 wake_up_all(&space_info->wait);
4117                 spin_unlock(&space_info->lock);
4118         }
4119         return ret;
4120 }
4121
4122 static struct btrfs_block_rsv *get_block_rsv(
4123                                         const struct btrfs_trans_handle *trans,
4124                                         const struct btrfs_root *root)
4125 {
4126         struct btrfs_block_rsv *block_rsv = NULL;
4127
4128         if (root->ref_cows)
4129                 block_rsv = trans->block_rsv;
4130
4131         if (root == root->fs_info->csum_root && trans->adding_csums)
4132                 block_rsv = trans->block_rsv;
4133
4134         if (!block_rsv)
4135                 block_rsv = root->block_rsv;
4136
4137         if (!block_rsv)
4138                 block_rsv = &root->fs_info->empty_block_rsv;
4139
4140         return block_rsv;
4141 }
4142
4143 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4144                                u64 num_bytes)
4145 {
4146         int ret = -ENOSPC;
4147         spin_lock(&block_rsv->lock);
4148         if (block_rsv->reserved >= num_bytes) {
4149                 block_rsv->reserved -= num_bytes;
4150                 if (block_rsv->reserved < block_rsv->size)
4151                         block_rsv->full = 0;
4152                 ret = 0;
4153         }
4154         spin_unlock(&block_rsv->lock);
4155         return ret;
4156 }
4157
4158 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4159                                 u64 num_bytes, int update_size)
4160 {
4161         spin_lock(&block_rsv->lock);
4162         block_rsv->reserved += num_bytes;
4163         if (update_size)
4164                 block_rsv->size += num_bytes;
4165         else if (block_rsv->reserved >= block_rsv->size)
4166                 block_rsv->full = 1;
4167         spin_unlock(&block_rsv->lock);
4168 }
4169
4170 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4171                                     struct btrfs_block_rsv *block_rsv,
4172                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4173 {
4174         struct btrfs_space_info *space_info = block_rsv->space_info;
4175
4176         spin_lock(&block_rsv->lock);
4177         if (num_bytes == (u64)-1)
4178                 num_bytes = block_rsv->size;
4179         block_rsv->size -= num_bytes;
4180         if (block_rsv->reserved >= block_rsv->size) {
4181                 num_bytes = block_rsv->reserved - block_rsv->size;
4182                 block_rsv->reserved = block_rsv->size;
4183                 block_rsv->full = 1;
4184         } else {
4185                 num_bytes = 0;
4186         }
4187         spin_unlock(&block_rsv->lock);
4188
4189         if (num_bytes > 0) {
4190                 if (dest) {
4191                         spin_lock(&dest->lock);
4192                         if (!dest->full) {
4193                                 u64 bytes_to_add;
4194
4195                                 bytes_to_add = dest->size - dest->reserved;
4196                                 bytes_to_add = min(num_bytes, bytes_to_add);
4197                                 dest->reserved += bytes_to_add;
4198                                 if (dest->reserved >= dest->size)
4199                                         dest->full = 1;
4200                                 num_bytes -= bytes_to_add;
4201                         }
4202                         spin_unlock(&dest->lock);
4203                 }
4204                 if (num_bytes) {
4205                         spin_lock(&space_info->lock);
4206                         space_info->bytes_may_use -= num_bytes;
4207                         trace_btrfs_space_reservation(fs_info, "space_info",
4208                                         space_info->flags, num_bytes, 0);
4209                         space_info->reservation_progress++;
4210                         spin_unlock(&space_info->lock);
4211                 }
4212         }
4213 }
4214
4215 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4216                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4217 {
4218         int ret;
4219
4220         ret = block_rsv_use_bytes(src, num_bytes);
4221         if (ret)
4222                 return ret;
4223
4224         block_rsv_add_bytes(dst, num_bytes, 1);
4225         return 0;
4226 }
4227
4228 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4229 {
4230         memset(rsv, 0, sizeof(*rsv));
4231         spin_lock_init(&rsv->lock);
4232         rsv->type = type;
4233 }
4234
4235 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4236                                               unsigned short type)
4237 {
4238         struct btrfs_block_rsv *block_rsv;
4239         struct btrfs_fs_info *fs_info = root->fs_info;
4240
4241         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4242         if (!block_rsv)
4243                 return NULL;
4244
4245         btrfs_init_block_rsv(block_rsv, type);
4246         block_rsv->space_info = __find_space_info(fs_info,
4247                                                   BTRFS_BLOCK_GROUP_METADATA);
4248         return block_rsv;
4249 }
4250
4251 void btrfs_free_block_rsv(struct btrfs_root *root,
4252                           struct btrfs_block_rsv *rsv)
4253 {
4254         if (!rsv)
4255                 return;
4256         btrfs_block_rsv_release(root, rsv, (u64)-1);
4257         kfree(rsv);
4258 }
4259
4260 int btrfs_block_rsv_add(struct btrfs_root *root,
4261                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4262                         enum btrfs_reserve_flush_enum flush)
4263 {
4264         int ret;
4265
4266         if (num_bytes == 0)
4267                 return 0;
4268
4269         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4270         if (!ret) {
4271                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4272                 return 0;
4273         }
4274
4275         return ret;
4276 }
4277
4278 int btrfs_block_rsv_check(struct btrfs_root *root,
4279                           struct btrfs_block_rsv *block_rsv, int min_factor)
4280 {
4281         u64 num_bytes = 0;
4282         int ret = -ENOSPC;
4283
4284         if (!block_rsv)
4285                 return 0;
4286
4287         spin_lock(&block_rsv->lock);
4288         num_bytes = div_factor(block_rsv->size, min_factor);
4289         if (block_rsv->reserved >= num_bytes)
4290                 ret = 0;
4291         spin_unlock(&block_rsv->lock);
4292
4293         return ret;
4294 }
4295
4296 int btrfs_block_rsv_refill(struct btrfs_root *root,
4297                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4298                            enum btrfs_reserve_flush_enum flush)
4299 {
4300         u64 num_bytes = 0;
4301         int ret = -ENOSPC;
4302
4303         if (!block_rsv)
4304                 return 0;
4305
4306         spin_lock(&block_rsv->lock);
4307         num_bytes = min_reserved;
4308         if (block_rsv->reserved >= num_bytes)
4309                 ret = 0;
4310         else
4311                 num_bytes -= block_rsv->reserved;
4312         spin_unlock(&block_rsv->lock);
4313
4314         if (!ret)
4315                 return 0;
4316
4317         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4318         if (!ret) {
4319                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4320                 return 0;
4321         }
4322
4323         return ret;
4324 }
4325
4326 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4327                             struct btrfs_block_rsv *dst_rsv,
4328                             u64 num_bytes)
4329 {
4330         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4331 }
4332
4333 void btrfs_block_rsv_release(struct btrfs_root *root,
4334                              struct btrfs_block_rsv *block_rsv,
4335                              u64 num_bytes)
4336 {
4337         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4338         if (global_rsv->full || global_rsv == block_rsv ||
4339             block_rsv->space_info != global_rsv->space_info)
4340                 global_rsv = NULL;
4341         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4342                                 num_bytes);
4343 }
4344
4345 /*
4346  * helper to calculate size of global block reservation.
4347  * the desired value is sum of space used by extent tree,
4348  * checksum tree and root tree
4349  */
4350 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4351 {
4352         struct btrfs_space_info *sinfo;
4353         u64 num_bytes;
4354         u64 meta_used;
4355         u64 data_used;
4356         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4357
4358         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4359         spin_lock(&sinfo->lock);
4360         data_used = sinfo->bytes_used;
4361         spin_unlock(&sinfo->lock);
4362
4363         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4364         spin_lock(&sinfo->lock);
4365         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4366                 data_used = 0;
4367         meta_used = sinfo->bytes_used;
4368         spin_unlock(&sinfo->lock);
4369
4370         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4371                     csum_size * 2;
4372         num_bytes += div64_u64(data_used + meta_used, 50);
4373
4374         if (num_bytes * 3 > meta_used)
4375                 num_bytes = div64_u64(meta_used, 3);
4376
4377         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4378 }
4379
4380 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4381 {
4382         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4383         struct btrfs_space_info *sinfo = block_rsv->space_info;
4384         u64 num_bytes;
4385
4386         num_bytes = calc_global_metadata_size(fs_info);
4387
4388         spin_lock(&sinfo->lock);
4389         spin_lock(&block_rsv->lock);
4390
4391         block_rsv->size = num_bytes;
4392
4393         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4394                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4395                     sinfo->bytes_may_use;
4396
4397         if (sinfo->total_bytes > num_bytes) {
4398                 num_bytes = sinfo->total_bytes - num_bytes;
4399                 block_rsv->reserved += num_bytes;
4400                 sinfo->bytes_may_use += num_bytes;
4401                 trace_btrfs_space_reservation(fs_info, "space_info",
4402                                       sinfo->flags, num_bytes, 1);
4403         }
4404
4405         if (block_rsv->reserved >= block_rsv->size) {
4406                 num_bytes = block_rsv->reserved - block_rsv->size;
4407                 sinfo->bytes_may_use -= num_bytes;
4408                 trace_btrfs_space_reservation(fs_info, "space_info",
4409                                       sinfo->flags, num_bytes, 0);
4410                 sinfo->reservation_progress++;
4411                 block_rsv->reserved = block_rsv->size;
4412                 block_rsv->full = 1;
4413         }
4414
4415         spin_unlock(&block_rsv->lock);
4416         spin_unlock(&sinfo->lock);
4417 }
4418
4419 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4420 {
4421         struct btrfs_space_info *space_info;
4422
4423         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4424         fs_info->chunk_block_rsv.space_info = space_info;
4425
4426         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4427         fs_info->global_block_rsv.space_info = space_info;
4428         fs_info->delalloc_block_rsv.space_info = space_info;
4429         fs_info->trans_block_rsv.space_info = space_info;
4430         fs_info->empty_block_rsv.space_info = space_info;
4431         fs_info->delayed_block_rsv.space_info = space_info;
4432
4433         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4434         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4435         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4436         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4437         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4438
4439         update_global_block_rsv(fs_info);
4440 }
4441
4442 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4443 {
4444         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4445                                 (u64)-1);
4446         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4447         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4448         WARN_ON(fs_info->trans_block_rsv.size > 0);
4449         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4450         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4451         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4452         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4453         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4454 }
4455
4456 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4457                                   struct btrfs_root *root)
4458 {
4459         if (!trans->block_rsv)
4460                 return;
4461
4462         if (!trans->bytes_reserved)
4463                 return;
4464
4465         trace_btrfs_space_reservation(root->fs_info, "transaction",
4466                                       trans->transid, trans->bytes_reserved, 0);
4467         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4468         trans->bytes_reserved = 0;
4469 }
4470
4471 /* Can only return 0 or -ENOSPC */
4472 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4473                                   struct inode *inode)
4474 {
4475         struct btrfs_root *root = BTRFS_I(inode)->root;
4476         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4477         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4478
4479         /*
4480          * We need to hold space in order to delete our orphan item once we've
4481          * added it, so this takes the reservation so we can release it later
4482          * when we are truly done with the orphan item.
4483          */
4484         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4485         trace_btrfs_space_reservation(root->fs_info, "orphan",
4486                                       btrfs_ino(inode), num_bytes, 1);
4487         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4488 }
4489
4490 void btrfs_orphan_release_metadata(struct inode *inode)
4491 {
4492         struct btrfs_root *root = BTRFS_I(inode)->root;
4493         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4494         trace_btrfs_space_reservation(root->fs_info, "orphan",
4495                                       btrfs_ino(inode), num_bytes, 0);
4496         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4497 }
4498
4499 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4500                                 struct btrfs_pending_snapshot *pending)
4501 {
4502         struct btrfs_root *root = pending->root;
4503         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4504         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4505         /*
4506          * two for root back/forward refs, two for directory entries,
4507          * one for root of the snapshot and one for parent inode.
4508          */
4509         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
4510         dst_rsv->space_info = src_rsv->space_info;
4511         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4512 }
4513
4514 /**
4515  * drop_outstanding_extent - drop an outstanding extent
4516  * @inode: the inode we're dropping the extent for
4517  *
4518  * This is called when we are freeing up an outstanding extent, either called
4519  * after an error or after an extent is written.  This will return the number of
4520  * reserved extents that need to be freed.  This must be called with
4521  * BTRFS_I(inode)->lock held.
4522  */
4523 static unsigned drop_outstanding_extent(struct inode *inode)
4524 {
4525         unsigned drop_inode_space = 0;
4526         unsigned dropped_extents = 0;
4527
4528         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4529         BTRFS_I(inode)->outstanding_extents--;
4530
4531         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4532             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4533                                &BTRFS_I(inode)->runtime_flags))
4534                 drop_inode_space = 1;
4535
4536         /*
4537          * If we have more or the same amount of outsanding extents than we have
4538          * reserved then we need to leave the reserved extents count alone.
4539          */
4540         if (BTRFS_I(inode)->outstanding_extents >=
4541             BTRFS_I(inode)->reserved_extents)
4542                 return drop_inode_space;
4543
4544         dropped_extents = BTRFS_I(inode)->reserved_extents -
4545                 BTRFS_I(inode)->outstanding_extents;
4546         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4547         return dropped_extents + drop_inode_space;
4548 }
4549
4550 /**
4551  * calc_csum_metadata_size - return the amount of metada space that must be
4552  *      reserved/free'd for the given bytes.
4553  * @inode: the inode we're manipulating
4554  * @num_bytes: the number of bytes in question
4555  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4556  *
4557  * This adjusts the number of csum_bytes in the inode and then returns the
4558  * correct amount of metadata that must either be reserved or freed.  We
4559  * calculate how many checksums we can fit into one leaf and then divide the
4560  * number of bytes that will need to be checksumed by this value to figure out
4561  * how many checksums will be required.  If we are adding bytes then the number
4562  * may go up and we will return the number of additional bytes that must be
4563  * reserved.  If it is going down we will return the number of bytes that must
4564  * be freed.
4565  *
4566  * This must be called with BTRFS_I(inode)->lock held.
4567  */
4568 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4569                                    int reserve)
4570 {
4571         struct btrfs_root *root = BTRFS_I(inode)->root;
4572         u64 csum_size;
4573         int num_csums_per_leaf;
4574         int num_csums;
4575         int old_csums;
4576
4577         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4578             BTRFS_I(inode)->csum_bytes == 0)
4579                 return 0;
4580
4581         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4582         if (reserve)
4583                 BTRFS_I(inode)->csum_bytes += num_bytes;
4584         else
4585                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4586         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4587         num_csums_per_leaf = (int)div64_u64(csum_size,
4588                                             sizeof(struct btrfs_csum_item) +
4589                                             sizeof(struct btrfs_disk_key));
4590         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4591         num_csums = num_csums + num_csums_per_leaf - 1;
4592         num_csums = num_csums / num_csums_per_leaf;
4593
4594         old_csums = old_csums + num_csums_per_leaf - 1;
4595         old_csums = old_csums / num_csums_per_leaf;
4596
4597         /* No change, no need to reserve more */
4598         if (old_csums == num_csums)
4599                 return 0;
4600
4601         if (reserve)
4602                 return btrfs_calc_trans_metadata_size(root,
4603                                                       num_csums - old_csums);
4604
4605         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4606 }
4607
4608 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4609 {
4610         struct btrfs_root *root = BTRFS_I(inode)->root;
4611         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4612         u64 to_reserve = 0;
4613         u64 csum_bytes;
4614         unsigned nr_extents = 0;
4615         int extra_reserve = 0;
4616         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4617         int ret = 0;
4618         bool delalloc_lock = true;
4619
4620         /* If we are a free space inode we need to not flush since we will be in
4621          * the middle of a transaction commit.  We also don't need the delalloc
4622          * mutex since we won't race with anybody.  We need this mostly to make
4623          * lockdep shut its filthy mouth.
4624          */
4625         if (btrfs_is_free_space_inode(inode)) {
4626                 flush = BTRFS_RESERVE_NO_FLUSH;
4627                 delalloc_lock = false;
4628         }
4629
4630         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4631             btrfs_transaction_in_commit(root->fs_info))
4632                 schedule_timeout(1);
4633
4634         if (delalloc_lock)
4635                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4636
4637         num_bytes = ALIGN(num_bytes, root->sectorsize);
4638
4639         spin_lock(&BTRFS_I(inode)->lock);
4640         BTRFS_I(inode)->outstanding_extents++;
4641
4642         if (BTRFS_I(inode)->outstanding_extents >
4643             BTRFS_I(inode)->reserved_extents)
4644                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4645                         BTRFS_I(inode)->reserved_extents;
4646
4647         /*
4648          * Add an item to reserve for updating the inode when we complete the
4649          * delalloc io.
4650          */
4651         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4652                       &BTRFS_I(inode)->runtime_flags)) {
4653                 nr_extents++;
4654                 extra_reserve = 1;
4655         }
4656
4657         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4658         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4659         csum_bytes = BTRFS_I(inode)->csum_bytes;
4660         spin_unlock(&BTRFS_I(inode)->lock);
4661
4662         if (root->fs_info->quota_enabled)
4663                 ret = btrfs_qgroup_reserve(root, num_bytes +
4664                                            nr_extents * root->leafsize);
4665
4666         /*
4667          * ret != 0 here means the qgroup reservation failed, we go straight to
4668          * the shared error handling then.
4669          */
4670         if (ret == 0)
4671                 ret = reserve_metadata_bytes(root, block_rsv,
4672                                              to_reserve, flush);
4673
4674         if (ret) {
4675                 u64 to_free = 0;
4676                 unsigned dropped;
4677
4678                 spin_lock(&BTRFS_I(inode)->lock);
4679                 dropped = drop_outstanding_extent(inode);
4680                 /*
4681                  * If the inodes csum_bytes is the same as the original
4682                  * csum_bytes then we know we haven't raced with any free()ers
4683                  * so we can just reduce our inodes csum bytes and carry on.
4684                  * Otherwise we have to do the normal free thing to account for
4685                  * the case that the free side didn't free up its reserve
4686                  * because of this outstanding reservation.
4687                  */
4688                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4689                         calc_csum_metadata_size(inode, num_bytes, 0);
4690                 else
4691                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4692                 spin_unlock(&BTRFS_I(inode)->lock);
4693                 if (dropped)
4694                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4695
4696                 if (to_free) {
4697                         btrfs_block_rsv_release(root, block_rsv, to_free);
4698                         trace_btrfs_space_reservation(root->fs_info,
4699                                                       "delalloc",
4700                                                       btrfs_ino(inode),
4701                                                       to_free, 0);
4702                 }
4703                 if (root->fs_info->quota_enabled) {
4704                         btrfs_qgroup_free(root, num_bytes +
4705                                                 nr_extents * root->leafsize);
4706                 }
4707                 if (delalloc_lock)
4708                         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4709                 return ret;
4710         }
4711
4712         spin_lock(&BTRFS_I(inode)->lock);
4713         if (extra_reserve) {
4714                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4715                         &BTRFS_I(inode)->runtime_flags);
4716                 nr_extents--;
4717         }
4718         BTRFS_I(inode)->reserved_extents += nr_extents;
4719         spin_unlock(&BTRFS_I(inode)->lock);
4720
4721         if (delalloc_lock)
4722                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4723
4724         if (to_reserve)
4725                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4726                                               btrfs_ino(inode), to_reserve, 1);
4727         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4728
4729         return 0;
4730 }
4731
4732 /**
4733  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4734  * @inode: the inode to release the reservation for
4735  * @num_bytes: the number of bytes we're releasing
4736  *
4737  * This will release the metadata reservation for an inode.  This can be called
4738  * once we complete IO for a given set of bytes to release their metadata
4739  * reservations.
4740  */
4741 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4742 {
4743         struct btrfs_root *root = BTRFS_I(inode)->root;
4744         u64 to_free = 0;
4745         unsigned dropped;
4746
4747         num_bytes = ALIGN(num_bytes, root->sectorsize);
4748         spin_lock(&BTRFS_I(inode)->lock);
4749         dropped = drop_outstanding_extent(inode);
4750
4751         if (num_bytes)
4752                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4753         spin_unlock(&BTRFS_I(inode)->lock);
4754         if (dropped > 0)
4755                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4756
4757         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4758                                       btrfs_ino(inode), to_free, 0);
4759         if (root->fs_info->quota_enabled) {
4760                 btrfs_qgroup_free(root, num_bytes +
4761                                         dropped * root->leafsize);
4762         }
4763
4764         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4765                                 to_free);
4766 }
4767
4768 /**
4769  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4770  * @inode: inode we're writing to
4771  * @num_bytes: the number of bytes we want to allocate
4772  *
4773  * This will do the following things
4774  *
4775  * o reserve space in the data space info for num_bytes
4776  * o reserve space in the metadata space info based on number of outstanding
4777  *   extents and how much csums will be needed
4778  * o add to the inodes ->delalloc_bytes
4779  * o add it to the fs_info's delalloc inodes list.
4780  *
4781  * This will return 0 for success and -ENOSPC if there is no space left.
4782  */
4783 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4784 {
4785         int ret;
4786
4787         ret = btrfs_check_data_free_space(inode, num_bytes);
4788         if (ret)
4789                 return ret;
4790
4791         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4792         if (ret) {
4793                 btrfs_free_reserved_data_space(inode, num_bytes);
4794                 return ret;
4795         }
4796
4797         return 0;
4798 }
4799
4800 /**
4801  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4802  * @inode: inode we're releasing space for
4803  * @num_bytes: the number of bytes we want to free up
4804  *
4805  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4806  * called in the case that we don't need the metadata AND data reservations
4807  * anymore.  So if there is an error or we insert an inline extent.
4808  *
4809  * This function will release the metadata space that was not used and will
4810  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4811  * list if there are no delalloc bytes left.
4812  */
4813 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4814 {
4815         btrfs_delalloc_release_metadata(inode, num_bytes);
4816         btrfs_free_reserved_data_space(inode, num_bytes);
4817 }
4818
4819 static int update_block_group(struct btrfs_root *root,
4820                               u64 bytenr, u64 num_bytes, int alloc)
4821 {
4822         struct btrfs_block_group_cache *cache = NULL;
4823         struct btrfs_fs_info *info = root->fs_info;
4824         u64 total = num_bytes;
4825         u64 old_val;
4826         u64 byte_in_group;
4827         int factor;
4828
4829         /* block accounting for super block */
4830         spin_lock(&info->delalloc_lock);
4831         old_val = btrfs_super_bytes_used(info->super_copy);
4832         if (alloc)
4833                 old_val += num_bytes;
4834         else
4835                 old_val -= num_bytes;
4836         btrfs_set_super_bytes_used(info->super_copy, old_val);
4837         spin_unlock(&info->delalloc_lock);
4838
4839         while (total) {
4840                 cache = btrfs_lookup_block_group(info, bytenr);
4841                 if (!cache)
4842                         return -ENOENT;
4843                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4844                                     BTRFS_BLOCK_GROUP_RAID1 |
4845                                     BTRFS_BLOCK_GROUP_RAID10))
4846                         factor = 2;
4847                 else
4848                         factor = 1;
4849                 /*
4850                  * If this block group has free space cache written out, we
4851                  * need to make sure to load it if we are removing space.  This
4852                  * is because we need the unpinning stage to actually add the
4853                  * space back to the block group, otherwise we will leak space.
4854                  */
4855                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4856                         cache_block_group(cache, 1);
4857
4858                 byte_in_group = bytenr - cache->key.objectid;
4859                 WARN_ON(byte_in_group > cache->key.offset);
4860
4861                 spin_lock(&cache->space_info->lock);
4862                 spin_lock(&cache->lock);
4863
4864                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4865                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4866                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4867
4868                 cache->dirty = 1;
4869                 old_val = btrfs_block_group_used(&cache->item);
4870                 num_bytes = min(total, cache->key.offset - byte_in_group);
4871                 if (alloc) {
4872                         old_val += num_bytes;
4873                         btrfs_set_block_group_used(&cache->item, old_val);
4874                         cache->reserved -= num_bytes;
4875                         cache->space_info->bytes_reserved -= num_bytes;
4876                         cache->space_info->bytes_used += num_bytes;
4877                         cache->space_info->disk_used += num_bytes * factor;
4878                         spin_unlock(&cache->lock);
4879                         spin_unlock(&cache->space_info->lock);
4880                 } else {
4881                         old_val -= num_bytes;
4882                         btrfs_set_block_group_used(&cache->item, old_val);
4883                         cache->pinned += num_bytes;
4884                         cache->space_info->bytes_pinned += num_bytes;
4885                         cache->space_info->bytes_used -= num_bytes;
4886                         cache->space_info->disk_used -= num_bytes * factor;
4887                         spin_unlock(&cache->lock);
4888                         spin_unlock(&cache->space_info->lock);
4889
4890                         set_extent_dirty(info->pinned_extents,
4891                                          bytenr, bytenr + num_bytes - 1,
4892                                          GFP_NOFS | __GFP_NOFAIL);
4893                 }
4894                 btrfs_put_block_group(cache);
4895                 total -= num_bytes;
4896                 bytenr += num_bytes;
4897         }
4898         return 0;
4899 }
4900
4901 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4902 {
4903         struct btrfs_block_group_cache *cache;
4904         u64 bytenr;
4905
4906         spin_lock(&root->fs_info->block_group_cache_lock);
4907         bytenr = root->fs_info->first_logical_byte;
4908         spin_unlock(&root->fs_info->block_group_cache_lock);
4909
4910         if (bytenr < (u64)-1)
4911                 return bytenr;
4912
4913         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4914         if (!cache)
4915                 return 0;
4916
4917         bytenr = cache->key.objectid;
4918         btrfs_put_block_group(cache);
4919
4920         return bytenr;
4921 }
4922
4923 static int pin_down_extent(struct btrfs_root *root,
4924                            struct btrfs_block_group_cache *cache,
4925                            u64 bytenr, u64 num_bytes, int reserved)
4926 {
4927         spin_lock(&cache->space_info->lock);
4928         spin_lock(&cache->lock);
4929         cache->pinned += num_bytes;
4930         cache->space_info->bytes_pinned += num_bytes;
4931         if (reserved) {
4932                 cache->reserved -= num_bytes;
4933                 cache->space_info->bytes_reserved -= num_bytes;
4934         }
4935         spin_unlock(&cache->lock);
4936         spin_unlock(&cache->space_info->lock);
4937
4938         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4939                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4940         return 0;
4941 }
4942
4943 /*
4944  * this function must be called within transaction
4945  */
4946 int btrfs_pin_extent(struct btrfs_root *root,
4947                      u64 bytenr, u64 num_bytes, int reserved)
4948 {
4949         struct btrfs_block_group_cache *cache;
4950
4951         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4952         BUG_ON(!cache); /* Logic error */
4953
4954         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4955
4956         btrfs_put_block_group(cache);
4957         return 0;
4958 }
4959
4960 /*
4961  * this function must be called within transaction
4962  */
4963 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
4964                                     u64 bytenr, u64 num_bytes)
4965 {
4966         struct btrfs_block_group_cache *cache;
4967
4968         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4969         BUG_ON(!cache); /* Logic error */
4970
4971         /*
4972          * pull in the free space cache (if any) so that our pin
4973          * removes the free space from the cache.  We have load_only set
4974          * to one because the slow code to read in the free extents does check
4975          * the pinned extents.
4976          */
4977         cache_block_group(cache, 1);
4978
4979         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4980
4981         /* remove us from the free space cache (if we're there at all) */
4982         btrfs_remove_free_space(cache, bytenr, num_bytes);
4983         btrfs_put_block_group(cache);
4984         return 0;
4985 }
4986
4987 /**
4988  * btrfs_update_reserved_bytes - update the block_group and space info counters
4989  * @cache:      The cache we are manipulating
4990  * @num_bytes:  The number of bytes in question
4991  * @reserve:    One of the reservation enums
4992  *
4993  * This is called by the allocator when it reserves space, or by somebody who is
4994  * freeing space that was never actually used on disk.  For example if you
4995  * reserve some space for a new leaf in transaction A and before transaction A
4996  * commits you free that leaf, you call this with reserve set to 0 in order to
4997  * clear the reservation.
4998  *
4999  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5000  * ENOSPC accounting.  For data we handle the reservation through clearing the
5001  * delalloc bits in the io_tree.  We have to do this since we could end up
5002  * allocating less disk space for the amount of data we have reserved in the
5003  * case of compression.
5004  *
5005  * If this is a reservation and the block group has become read only we cannot
5006  * make the reservation and return -EAGAIN, otherwise this function always
5007  * succeeds.
5008  */
5009 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5010                                        u64 num_bytes, int reserve)
5011 {
5012         struct btrfs_space_info *space_info = cache->space_info;
5013         int ret = 0;
5014
5015         spin_lock(&space_info->lock);
5016         spin_lock(&cache->lock);
5017         if (reserve != RESERVE_FREE) {
5018                 if (cache->ro) {
5019                         ret = -EAGAIN;
5020                 } else {
5021                         cache->reserved += num_bytes;
5022                         space_info->bytes_reserved += num_bytes;
5023                         if (reserve == RESERVE_ALLOC) {
5024                                 trace_btrfs_space_reservation(cache->fs_info,
5025                                                 "space_info", space_info->flags,
5026                                                 num_bytes, 0);
5027                                 space_info->bytes_may_use -= num_bytes;
5028                         }
5029                 }
5030         } else {
5031                 if (cache->ro)
5032                         space_info->bytes_readonly += num_bytes;
5033                 cache->reserved -= num_bytes;
5034                 space_info->bytes_reserved -= num_bytes;
5035                 space_info->reservation_progress++;
5036         }
5037         spin_unlock(&cache->lock);
5038         spin_unlock(&space_info->lock);
5039         return ret;
5040 }
5041
5042 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5043                                 struct btrfs_root *root)
5044 {
5045         struct btrfs_fs_info *fs_info = root->fs_info;
5046         struct btrfs_caching_control *next;
5047         struct btrfs_caching_control *caching_ctl;
5048         struct btrfs_block_group_cache *cache;
5049
5050         down_write(&fs_info->extent_commit_sem);
5051
5052         list_for_each_entry_safe(caching_ctl, next,
5053                                  &fs_info->caching_block_groups, list) {
5054                 cache = caching_ctl->block_group;
5055                 if (block_group_cache_done(cache)) {
5056                         cache->last_byte_to_unpin = (u64)-1;
5057                         list_del_init(&caching_ctl->list);
5058                         put_caching_control(caching_ctl);
5059                 } else {
5060                         cache->last_byte_to_unpin = caching_ctl->progress;
5061                 }
5062         }
5063
5064         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5065                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5066         else
5067                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5068
5069         up_write(&fs_info->extent_commit_sem);
5070
5071         update_global_block_rsv(fs_info);
5072 }
5073
5074 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5075 {
5076         struct btrfs_fs_info *fs_info = root->fs_info;
5077         struct btrfs_block_group_cache *cache = NULL;
5078         struct btrfs_space_info *space_info;
5079         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5080         u64 len;
5081         bool readonly;
5082
5083         while (start <= end) {
5084                 readonly = false;
5085                 if (!cache ||
5086                     start >= cache->key.objectid + cache->key.offset) {
5087                         if (cache)
5088                                 btrfs_put_block_group(cache);
5089                         cache = btrfs_lookup_block_group(fs_info, start);
5090                         BUG_ON(!cache); /* Logic error */
5091                 }
5092
5093                 len = cache->key.objectid + cache->key.offset - start;
5094                 len = min(len, end + 1 - start);
5095
5096                 if (start < cache->last_byte_to_unpin) {
5097                         len = min(len, cache->last_byte_to_unpin - start);
5098                         btrfs_add_free_space(cache, start, len);
5099                 }
5100
5101                 start += len;
5102                 space_info = cache->space_info;
5103
5104                 spin_lock(&space_info->lock);
5105                 spin_lock(&cache->lock);
5106                 cache->pinned -= len;
5107                 space_info->bytes_pinned -= len;
5108                 if (cache->ro) {
5109                         space_info->bytes_readonly += len;
5110                         readonly = true;
5111                 }
5112                 spin_unlock(&cache->lock);
5113                 if (!readonly && global_rsv->space_info == space_info) {
5114                         spin_lock(&global_rsv->lock);
5115                         if (!global_rsv->full) {
5116                                 len = min(len, global_rsv->size -
5117                                           global_rsv->reserved);
5118                                 global_rsv->reserved += len;
5119                                 space_info->bytes_may_use += len;
5120                                 if (global_rsv->reserved >= global_rsv->size)
5121                                         global_rsv->full = 1;
5122                         }
5123                         spin_unlock(&global_rsv->lock);
5124                 }
5125                 spin_unlock(&space_info->lock);
5126         }
5127
5128         if (cache)
5129                 btrfs_put_block_group(cache);
5130         return 0;
5131 }
5132
5133 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5134                                struct btrfs_root *root)
5135 {
5136         struct btrfs_fs_info *fs_info = root->fs_info;
5137         struct extent_io_tree *unpin;
5138         u64 start;
5139         u64 end;
5140         int ret;
5141
5142         if (trans->aborted)
5143                 return 0;
5144
5145         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5146                 unpin = &fs_info->freed_extents[1];
5147         else
5148                 unpin = &fs_info->freed_extents[0];
5149
5150         while (1) {
5151                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5152                                             EXTENT_DIRTY, NULL);
5153                 if (ret)
5154                         break;
5155
5156                 if (btrfs_test_opt(root, DISCARD))
5157                         ret = btrfs_discard_extent(root, start,
5158                                                    end + 1 - start, NULL);
5159
5160                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5161                 unpin_extent_range(root, start, end);
5162                 cond_resched();
5163         }
5164
5165         return 0;
5166 }
5167
5168 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5169                                 struct btrfs_root *root,
5170                                 u64 bytenr, u64 num_bytes, u64 parent,
5171                                 u64 root_objectid, u64 owner_objectid,
5172                                 u64 owner_offset, int refs_to_drop,
5173                                 struct btrfs_delayed_extent_op *extent_op)
5174 {
5175         struct btrfs_key key;
5176         struct btrfs_path *path;
5177         struct btrfs_fs_info *info = root->fs_info;
5178         struct btrfs_root *extent_root = info->extent_root;
5179         struct extent_buffer *leaf;
5180         struct btrfs_extent_item *ei;
5181         struct btrfs_extent_inline_ref *iref;
5182         int ret;
5183         int is_data;
5184         int extent_slot = 0;
5185         int found_extent = 0;
5186         int num_to_del = 1;
5187         u32 item_size;
5188         u64 refs;
5189
5190         path = btrfs_alloc_path();
5191         if (!path)
5192                 return -ENOMEM;
5193
5194         path->reada = 1;
5195         path->leave_spinning = 1;
5196
5197         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5198         BUG_ON(!is_data && refs_to_drop != 1);
5199
5200         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5201                                     bytenr, num_bytes, parent,
5202                                     root_objectid, owner_objectid,
5203                                     owner_offset);
5204         if (ret == 0) {
5205                 extent_slot = path->slots[0];
5206                 while (extent_slot >= 0) {
5207                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5208                                               extent_slot);
5209                         if (key.objectid != bytenr)
5210                                 break;
5211                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5212                             key.offset == num_bytes) {
5213                                 found_extent = 1;
5214                                 break;
5215                         }
5216                         if (path->slots[0] - extent_slot > 5)
5217                                 break;
5218                         extent_slot--;
5219                 }
5220 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5221                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5222                 if (found_extent && item_size < sizeof(*ei))
5223                         found_extent = 0;
5224 #endif
5225                 if (!found_extent) {
5226                         BUG_ON(iref);
5227                         ret = remove_extent_backref(trans, extent_root, path,
5228                                                     NULL, refs_to_drop,
5229                                                     is_data);
5230                         if (ret) {
5231                                 btrfs_abort_transaction(trans, extent_root, ret);
5232                                 goto out;
5233                         }
5234                         btrfs_release_path(path);
5235                         path->leave_spinning = 1;
5236
5237                         key.objectid = bytenr;
5238                         key.type = BTRFS_EXTENT_ITEM_KEY;
5239                         key.offset = num_bytes;
5240
5241                         ret = btrfs_search_slot(trans, extent_root,
5242                                                 &key, path, -1, 1);
5243                         if (ret) {
5244                                 printk(KERN_ERR "umm, got %d back from search"
5245                                        ", was looking for %llu\n", ret,
5246                                        (unsigned long long)bytenr);
5247                                 if (ret > 0)
5248                                         btrfs_print_leaf(extent_root,
5249                                                          path->nodes[0]);
5250                         }
5251                         if (ret < 0) {
5252                                 btrfs_abort_transaction(trans, extent_root, ret);
5253                                 goto out;
5254                         }
5255                         extent_slot = path->slots[0];
5256                 }
5257         } else if (ret == -ENOENT) {
5258                 btrfs_print_leaf(extent_root, path->nodes[0]);
5259                 WARN_ON(1);
5260                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5261                        "parent %llu root %llu  owner %llu offset %llu\n",
5262                        (unsigned long long)bytenr,
5263                        (unsigned long long)parent,
5264                        (unsigned long long)root_objectid,
5265                        (unsigned long long)owner_objectid,
5266                        (unsigned long long)owner_offset);
5267         } else {
5268                 btrfs_abort_transaction(trans, extent_root, ret);
5269                 goto out;
5270         }
5271
5272         leaf = path->nodes[0];
5273         item_size = btrfs_item_size_nr(leaf, extent_slot);
5274 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5275         if (item_size < sizeof(*ei)) {
5276                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5277                 ret = convert_extent_item_v0(trans, extent_root, path,
5278                                              owner_objectid, 0);
5279                 if (ret < 0) {
5280                         btrfs_abort_transaction(trans, extent_root, ret);
5281                         goto out;
5282                 }
5283
5284                 btrfs_release_path(path);
5285                 path->leave_spinning = 1;
5286
5287                 key.objectid = bytenr;
5288                 key.type = BTRFS_EXTENT_ITEM_KEY;
5289                 key.offset = num_bytes;
5290
5291                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5292                                         -1, 1);
5293                 if (ret) {
5294                         printk(KERN_ERR "umm, got %d back from search"
5295                                ", was looking for %llu\n", ret,
5296                                (unsigned long long)bytenr);
5297                         btrfs_print_leaf(extent_root, path->nodes[0]);
5298                 }
5299                 if (ret < 0) {
5300                         btrfs_abort_transaction(trans, extent_root, ret);
5301                         goto out;
5302                 }
5303
5304                 extent_slot = path->slots[0];
5305                 leaf = path->nodes[0];
5306                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5307         }
5308 #endif
5309         BUG_ON(item_size < sizeof(*ei));
5310         ei = btrfs_item_ptr(leaf, extent_slot,
5311                             struct btrfs_extent_item);
5312         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5313                 struct btrfs_tree_block_info *bi;
5314                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5315                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5316                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5317         }
5318
5319         refs = btrfs_extent_refs(leaf, ei);
5320         BUG_ON(refs < refs_to_drop);
5321         refs -= refs_to_drop;
5322
5323         if (refs > 0) {
5324                 if (extent_op)
5325                         __run_delayed_extent_op(extent_op, leaf, ei);
5326                 /*
5327                  * In the case of inline back ref, reference count will
5328                  * be updated by remove_extent_backref
5329                  */
5330                 if (iref) {
5331                         BUG_ON(!found_extent);
5332                 } else {
5333                         btrfs_set_extent_refs(leaf, ei, refs);
5334                         btrfs_mark_buffer_dirty(leaf);
5335                 }
5336                 if (found_extent) {
5337                         ret = remove_extent_backref(trans, extent_root, path,
5338                                                     iref, refs_to_drop,
5339                                                     is_data);
5340                         if (ret) {
5341                                 btrfs_abort_transaction(trans, extent_root, ret);
5342                                 goto out;
5343                         }
5344                 }
5345         } else {
5346                 if (found_extent) {
5347                         BUG_ON(is_data && refs_to_drop !=
5348                                extent_data_ref_count(root, path, iref));
5349                         if (iref) {
5350                                 BUG_ON(path->slots[0] != extent_slot);
5351                         } else {
5352                                 BUG_ON(path->slots[0] != extent_slot + 1);
5353                                 path->slots[0] = extent_slot;
5354                                 num_to_del = 2;
5355                         }
5356                 }
5357
5358                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5359                                       num_to_del);
5360                 if (ret) {
5361                         btrfs_abort_transaction(trans, extent_root, ret);
5362                         goto out;
5363                 }
5364                 btrfs_release_path(path);
5365
5366                 if (is_data) {
5367                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5368                         if (ret) {
5369                                 btrfs_abort_transaction(trans, extent_root, ret);
5370                                 goto out;
5371                         }
5372                 }
5373
5374                 ret = update_block_group(root, bytenr, num_bytes, 0);
5375                 if (ret) {
5376                         btrfs_abort_transaction(trans, extent_root, ret);
5377                         goto out;
5378                 }
5379         }
5380 out:
5381         btrfs_free_path(path);
5382         return ret;
5383 }
5384
5385 /*
5386  * when we free an block, it is possible (and likely) that we free the last
5387  * delayed ref for that extent as well.  This searches the delayed ref tree for
5388  * a given extent, and if there are no other delayed refs to be processed, it
5389  * removes it from the tree.
5390  */
5391 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5392                                       struct btrfs_root *root, u64 bytenr)
5393 {
5394         struct btrfs_delayed_ref_head *head;
5395         struct btrfs_delayed_ref_root *delayed_refs;
5396         struct btrfs_delayed_ref_node *ref;
5397         struct rb_node *node;
5398         int ret = 0;
5399
5400         delayed_refs = &trans->transaction->delayed_refs;
5401         spin_lock(&delayed_refs->lock);
5402         head = btrfs_find_delayed_ref_head(trans, bytenr);
5403         if (!head)
5404                 goto out;
5405
5406         node = rb_prev(&head->node.rb_node);
5407         if (!node)
5408                 goto out;
5409
5410         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5411
5412         /* there are still entries for this ref, we can't drop it */
5413         if (ref->bytenr == bytenr)
5414                 goto out;
5415
5416         if (head->extent_op) {
5417                 if (!head->must_insert_reserved)
5418                         goto out;
5419                 btrfs_free_delayed_extent_op(head->extent_op);
5420                 head->extent_op = NULL;
5421         }
5422
5423         /*
5424          * waiting for the lock here would deadlock.  If someone else has it
5425          * locked they are already in the process of dropping it anyway
5426          */
5427         if (!mutex_trylock(&head->mutex))
5428                 goto out;
5429
5430         /*
5431          * at this point we have a head with no other entries.  Go
5432          * ahead and process it.
5433          */
5434         head->node.in_tree = 0;
5435         rb_erase(&head->node.rb_node, &delayed_refs->root);
5436
5437         delayed_refs->num_entries--;
5438
5439         /*
5440          * we don't take a ref on the node because we're removing it from the
5441          * tree, so we just steal the ref the tree was holding.
5442          */
5443         delayed_refs->num_heads--;
5444         if (list_empty(&head->cluster))
5445                 delayed_refs->num_heads_ready--;
5446
5447         list_del_init(&head->cluster);
5448         spin_unlock(&delayed_refs->lock);
5449
5450         BUG_ON(head->extent_op);
5451         if (head->must_insert_reserved)
5452                 ret = 1;
5453
5454         mutex_unlock(&head->mutex);
5455         btrfs_put_delayed_ref(&head->node);
5456         return ret;
5457 out:
5458         spin_unlock(&delayed_refs->lock);
5459         return 0;
5460 }
5461
5462 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5463                            struct btrfs_root *root,
5464                            struct extent_buffer *buf,
5465                            u64 parent, int last_ref)
5466 {
5467         struct btrfs_block_group_cache *cache = NULL;
5468         int ret;
5469
5470         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5471                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5472                                         buf->start, buf->len,
5473                                         parent, root->root_key.objectid,
5474                                         btrfs_header_level(buf),
5475                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5476                 BUG_ON(ret); /* -ENOMEM */
5477         }
5478
5479         if (!last_ref)
5480                 return;
5481
5482         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5483
5484         if (btrfs_header_generation(buf) == trans->transid) {
5485                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5486                         ret = check_ref_cleanup(trans, root, buf->start);
5487                         if (!ret)
5488                                 goto out;
5489                 }
5490
5491                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5492                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5493                         goto out;
5494                 }
5495
5496                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5497
5498                 btrfs_add_free_space(cache, buf->start, buf->len);
5499                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5500         }
5501 out:
5502         /*
5503          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5504          * anymore.
5505          */
5506         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5507         btrfs_put_block_group(cache);
5508 }
5509
5510 /* Can return -ENOMEM */
5511 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5512                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5513                       u64 owner, u64 offset, int for_cow)
5514 {
5515         int ret;
5516         struct btrfs_fs_info *fs_info = root->fs_info;
5517
5518         /*
5519          * tree log blocks never actually go into the extent allocation
5520          * tree, just update pinning info and exit early.
5521          */
5522         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5523                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5524                 /* unlocks the pinned mutex */
5525                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5526                 ret = 0;
5527         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5528                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5529                                         num_bytes,
5530                                         parent, root_objectid, (int)owner,
5531                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5532         } else {
5533                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5534                                                 num_bytes,
5535                                                 parent, root_objectid, owner,
5536                                                 offset, BTRFS_DROP_DELAYED_REF,
5537                                                 NULL, for_cow);
5538         }
5539         return ret;
5540 }
5541
5542 static u64 stripe_align(struct btrfs_root *root, u64 val)
5543 {
5544         u64 mask = ((u64)root->stripesize - 1);
5545         u64 ret = (val + mask) & ~mask;
5546         return ret;
5547 }
5548
5549 /*
5550  * when we wait for progress in the block group caching, its because
5551  * our allocation attempt failed at least once.  So, we must sleep
5552  * and let some progress happen before we try again.
5553  *
5554  * This function will sleep at least once waiting for new free space to
5555  * show up, and then it will check the block group free space numbers
5556  * for our min num_bytes.  Another option is to have it go ahead
5557  * and look in the rbtree for a free extent of a given size, but this
5558  * is a good start.
5559  */
5560 static noinline int
5561 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5562                                 u64 num_bytes)
5563 {
5564         struct btrfs_caching_control *caching_ctl;
5565
5566         caching_ctl = get_caching_control(cache);
5567         if (!caching_ctl)
5568                 return 0;
5569
5570         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5571                    (cache->free_space_ctl->free_space >= num_bytes));
5572
5573         put_caching_control(caching_ctl);
5574         return 0;
5575 }
5576
5577 static noinline int
5578 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5579 {
5580         struct btrfs_caching_control *caching_ctl;
5581
5582         caching_ctl = get_caching_control(cache);
5583         if (!caching_ctl)
5584                 return 0;
5585
5586         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5587
5588         put_caching_control(caching_ctl);
5589         return 0;
5590 }
5591
5592 int __get_raid_index(u64 flags)
5593 {
5594         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5595                 return BTRFS_RAID_RAID10;
5596         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5597                 return BTRFS_RAID_RAID1;
5598         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5599                 return BTRFS_RAID_DUP;
5600         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5601                 return BTRFS_RAID_RAID0;
5602         else
5603                 return BTRFS_RAID_SINGLE;
5604 }
5605
5606 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5607 {
5608         return __get_raid_index(cache->flags);
5609 }
5610
5611 enum btrfs_loop_type {
5612         LOOP_CACHING_NOWAIT = 0,
5613         LOOP_CACHING_WAIT = 1,
5614         LOOP_ALLOC_CHUNK = 2,
5615         LOOP_NO_EMPTY_SIZE = 3,
5616 };
5617
5618 /*
5619  * walks the btree of allocated extents and find a hole of a given size.
5620  * The key ins is changed to record the hole:
5621  * ins->objectid == block start
5622  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5623  * ins->offset == number of blocks
5624  * Any available blocks before search_start are skipped.
5625  */
5626 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5627                                      struct btrfs_root *orig_root,
5628                                      u64 num_bytes, u64 empty_size,
5629                                      u64 hint_byte, struct btrfs_key *ins,
5630                                      u64 data)
5631 {
5632         int ret = 0;
5633         struct btrfs_root *root = orig_root->fs_info->extent_root;
5634         struct btrfs_free_cluster *last_ptr = NULL;
5635         struct btrfs_block_group_cache *block_group = NULL;
5636         struct btrfs_block_group_cache *used_block_group;
5637         u64 search_start = 0;
5638         int empty_cluster = 2 * 1024 * 1024;
5639         struct btrfs_space_info *space_info;
5640         int loop = 0;
5641         int index = __get_raid_index(data);
5642         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5643                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5644         bool found_uncached_bg = false;
5645         bool failed_cluster_refill = false;
5646         bool failed_alloc = false;
5647         bool use_cluster = true;
5648         bool have_caching_bg = false;
5649
5650         WARN_ON(num_bytes < root->sectorsize);
5651         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5652         ins->objectid = 0;
5653         ins->offset = 0;
5654
5655         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5656
5657         space_info = __find_space_info(root->fs_info, data);
5658         if (!space_info) {
5659                 printk(KERN_ERR "No space info for %llu\n", data);
5660                 return -ENOSPC;
5661         }
5662
5663         /*
5664          * If the space info is for both data and metadata it means we have a
5665          * small filesystem and we can't use the clustering stuff.
5666          */
5667         if (btrfs_mixed_space_info(space_info))
5668                 use_cluster = false;
5669
5670         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5671                 last_ptr = &root->fs_info->meta_alloc_cluster;
5672                 if (!btrfs_test_opt(root, SSD))
5673                         empty_cluster = 64 * 1024;
5674         }
5675
5676         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5677             btrfs_test_opt(root, SSD)) {
5678                 last_ptr = &root->fs_info->data_alloc_cluster;
5679         }
5680
5681         if (last_ptr) {
5682                 spin_lock(&last_ptr->lock);
5683                 if (last_ptr->block_group)
5684                         hint_byte = last_ptr->window_start;
5685                 spin_unlock(&last_ptr->lock);
5686         }
5687
5688         search_start = max(search_start, first_logical_byte(root, 0));
5689         search_start = max(search_start, hint_byte);
5690
5691         if (!last_ptr)
5692                 empty_cluster = 0;
5693
5694         if (search_start == hint_byte) {
5695                 block_group = btrfs_lookup_block_group(root->fs_info,
5696                                                        search_start);
5697                 used_block_group = block_group;
5698                 /*
5699                  * we don't want to use the block group if it doesn't match our
5700                  * allocation bits, or if its not cached.
5701                  *
5702                  * However if we are re-searching with an ideal block group
5703                  * picked out then we don't care that the block group is cached.
5704                  */
5705                 if (block_group && block_group_bits(block_group, data) &&
5706                     block_group->cached != BTRFS_CACHE_NO) {
5707                         down_read(&space_info->groups_sem);
5708                         if (list_empty(&block_group->list) ||
5709                             block_group->ro) {
5710                                 /*
5711                                  * someone is removing this block group,
5712                                  * we can't jump into the have_block_group
5713                                  * target because our list pointers are not
5714                                  * valid
5715                                  */
5716                                 btrfs_put_block_group(block_group);
5717                                 up_read(&space_info->groups_sem);
5718                         } else {
5719                                 index = get_block_group_index(block_group);
5720                                 goto have_block_group;
5721                         }
5722                 } else if (block_group) {
5723                         btrfs_put_block_group(block_group);
5724                 }
5725         }
5726 search:
5727         have_caching_bg = false;
5728         down_read(&space_info->groups_sem);
5729         list_for_each_entry(block_group, &space_info->block_groups[index],
5730                             list) {
5731                 u64 offset;
5732                 int cached;
5733
5734                 used_block_group = block_group;
5735                 btrfs_get_block_group(block_group);
5736                 search_start = block_group->key.objectid;
5737
5738                 /*
5739                  * this can happen if we end up cycling through all the
5740                  * raid types, but we want to make sure we only allocate
5741                  * for the proper type.
5742                  */
5743                 if (!block_group_bits(block_group, data)) {
5744                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5745                                 BTRFS_BLOCK_GROUP_RAID1 |
5746                                 BTRFS_BLOCK_GROUP_RAID10;
5747
5748                         /*
5749                          * if they asked for extra copies and this block group
5750                          * doesn't provide them, bail.  This does allow us to
5751                          * fill raid0 from raid1.
5752                          */
5753                         if ((data & extra) && !(block_group->flags & extra))
5754                                 goto loop;
5755                 }
5756
5757 have_block_group:
5758                 cached = block_group_cache_done(block_group);
5759                 if (unlikely(!cached)) {
5760                         found_uncached_bg = true;
5761                         ret = cache_block_group(block_group, 0);
5762                         BUG_ON(ret < 0);
5763                         ret = 0;
5764                 }
5765
5766                 if (unlikely(block_group->ro))
5767                         goto loop;
5768
5769                 /*
5770                  * Ok we want to try and use the cluster allocator, so
5771                  * lets look there
5772                  */
5773                 if (last_ptr) {
5774                         /*
5775                          * the refill lock keeps out other
5776                          * people trying to start a new cluster
5777                          */
5778                         spin_lock(&last_ptr->refill_lock);
5779                         used_block_group = last_ptr->block_group;
5780                         if (used_block_group != block_group &&
5781                             (!used_block_group ||
5782                              used_block_group->ro ||
5783                              !block_group_bits(used_block_group, data))) {
5784                                 used_block_group = block_group;
5785                                 goto refill_cluster;
5786                         }
5787
5788                         if (used_block_group != block_group)
5789                                 btrfs_get_block_group(used_block_group);
5790
5791                         offset = btrfs_alloc_from_cluster(used_block_group,
5792                           last_ptr, num_bytes, used_block_group->key.objectid);
5793                         if (offset) {
5794                                 /* we have a block, we're done */
5795                                 spin_unlock(&last_ptr->refill_lock);
5796                                 trace_btrfs_reserve_extent_cluster(root,
5797                                         block_group, search_start, num_bytes);
5798                                 goto checks;
5799                         }
5800
5801                         WARN_ON(last_ptr->block_group != used_block_group);
5802                         if (used_block_group != block_group) {
5803                                 btrfs_put_block_group(used_block_group);
5804                                 used_block_group = block_group;
5805                         }
5806 refill_cluster:
5807                         BUG_ON(used_block_group != block_group);
5808                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5809                          * set up a new clusters, so lets just skip it
5810                          * and let the allocator find whatever block
5811                          * it can find.  If we reach this point, we
5812                          * will have tried the cluster allocator
5813                          * plenty of times and not have found
5814                          * anything, so we are likely way too
5815                          * fragmented for the clustering stuff to find
5816                          * anything.
5817                          *
5818                          * However, if the cluster is taken from the
5819                          * current block group, release the cluster
5820                          * first, so that we stand a better chance of
5821                          * succeeding in the unclustered
5822                          * allocation.  */
5823                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5824                             last_ptr->block_group != block_group) {
5825                                 spin_unlock(&last_ptr->refill_lock);
5826                                 goto unclustered_alloc;
5827                         }
5828
5829                         /*
5830                          * this cluster didn't work out, free it and
5831                          * start over
5832                          */
5833                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5834
5835                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5836                                 spin_unlock(&last_ptr->refill_lock);
5837                                 goto unclustered_alloc;
5838                         }
5839
5840                         /* allocate a cluster in this block group */
5841                         ret = btrfs_find_space_cluster(trans, root,
5842                                                block_group, last_ptr,
5843                                                search_start, num_bytes,
5844                                                empty_cluster + empty_size);
5845                         if (ret == 0) {
5846                                 /*
5847                                  * now pull our allocation out of this
5848                                  * cluster
5849                                  */
5850                                 offset = btrfs_alloc_from_cluster(block_group,
5851                                                   last_ptr, num_bytes,
5852                                                   search_start);
5853                                 if (offset) {
5854                                         /* we found one, proceed */
5855                                         spin_unlock(&last_ptr->refill_lock);
5856                                         trace_btrfs_reserve_extent_cluster(root,
5857                                                 block_group, search_start,
5858                                                 num_bytes);
5859                                         goto checks;
5860                                 }
5861                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5862                                    && !failed_cluster_refill) {
5863                                 spin_unlock(&last_ptr->refill_lock);
5864
5865                                 failed_cluster_refill = true;
5866                                 wait_block_group_cache_progress(block_group,
5867                                        num_bytes + empty_cluster + empty_size);
5868                                 goto have_block_group;
5869                         }
5870
5871                         /*
5872                          * at this point we either didn't find a cluster
5873                          * or we weren't able to allocate a block from our
5874                          * cluster.  Free the cluster we've been trying
5875                          * to use, and go to the next block group
5876                          */
5877                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5878                         spin_unlock(&last_ptr->refill_lock);
5879                         goto loop;
5880                 }
5881
5882 unclustered_alloc:
5883                 spin_lock(&block_group->free_space_ctl->tree_lock);
5884                 if (cached &&
5885                     block_group->free_space_ctl->free_space <
5886                     num_bytes + empty_cluster + empty_size) {
5887                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5888                         goto loop;
5889                 }
5890                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5891
5892                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5893                                                     num_bytes, empty_size);
5894                 /*
5895                  * If we didn't find a chunk, and we haven't failed on this
5896                  * block group before, and this block group is in the middle of
5897                  * caching and we are ok with waiting, then go ahead and wait
5898                  * for progress to be made, and set failed_alloc to true.
5899                  *
5900                  * If failed_alloc is true then we've already waited on this
5901                  * block group once and should move on to the next block group.
5902                  */
5903                 if (!offset && !failed_alloc && !cached &&
5904                     loop > LOOP_CACHING_NOWAIT) {
5905                         wait_block_group_cache_progress(block_group,
5906                                                 num_bytes + empty_size);
5907                         failed_alloc = true;
5908                         goto have_block_group;
5909                 } else if (!offset) {
5910                         if (!cached)
5911                                 have_caching_bg = true;
5912                         goto loop;
5913                 }
5914 checks:
5915                 search_start = stripe_align(root, offset);
5916
5917                 /* move on to the next group */
5918                 if (search_start + num_bytes >
5919                     used_block_group->key.objectid + used_block_group->key.offset) {
5920                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5921                         goto loop;
5922                 }
5923
5924                 if (offset < search_start)
5925                         btrfs_add_free_space(used_block_group, offset,
5926                                              search_start - offset);
5927                 BUG_ON(offset > search_start);
5928
5929                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5930                                                   alloc_type);
5931                 if (ret == -EAGAIN) {
5932                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5933                         goto loop;
5934                 }
5935
5936                 /* we are all good, lets return */
5937                 ins->objectid = search_start;
5938                 ins->offset = num_bytes;
5939
5940                 trace_btrfs_reserve_extent(orig_root, block_group,
5941                                            search_start, num_bytes);
5942                 if (used_block_group != block_group)
5943                         btrfs_put_block_group(used_block_group);
5944                 btrfs_put_block_group(block_group);
5945                 break;
5946 loop:
5947                 failed_cluster_refill = false;
5948                 failed_alloc = false;
5949                 BUG_ON(index != get_block_group_index(block_group));
5950                 if (used_block_group != block_group)
5951                         btrfs_put_block_group(used_block_group);
5952                 btrfs_put_block_group(block_group);
5953         }
5954         up_read(&space_info->groups_sem);
5955
5956         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5957                 goto search;
5958
5959         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5960                 goto search;
5961
5962         /*
5963          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5964          *                      caching kthreads as we move along
5965          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5966          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5967          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5968          *                      again
5969          */
5970         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5971                 index = 0;
5972                 loop++;
5973                 if (loop == LOOP_ALLOC_CHUNK) {
5974                         ret = do_chunk_alloc(trans, root, data,
5975                                              CHUNK_ALLOC_FORCE);
5976                         /*
5977                          * Do not bail out on ENOSPC since we
5978                          * can do more things.
5979                          */
5980                         if (ret < 0 && ret != -ENOSPC) {
5981                                 btrfs_abort_transaction(trans,
5982                                                         root, ret);
5983                                 goto out;
5984                         }
5985                 }
5986
5987                 if (loop == LOOP_NO_EMPTY_SIZE) {
5988                         empty_size = 0;
5989                         empty_cluster = 0;
5990                 }
5991
5992                 goto search;
5993         } else if (!ins->objectid) {
5994                 ret = -ENOSPC;
5995         } else if (ins->objectid) {
5996                 ret = 0;
5997         }
5998 out:
5999
6000         return ret;
6001 }
6002
6003 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6004                             int dump_block_groups)
6005 {
6006         struct btrfs_block_group_cache *cache;
6007         int index = 0;
6008
6009         spin_lock(&info->lock);
6010         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6011                (unsigned long long)info->flags,
6012                (unsigned long long)(info->total_bytes - info->bytes_used -
6013                                     info->bytes_pinned - info->bytes_reserved -
6014                                     info->bytes_readonly),
6015                (info->full) ? "" : "not ");
6016         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6017                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6018                (unsigned long long)info->total_bytes,
6019                (unsigned long long)info->bytes_used,
6020                (unsigned long long)info->bytes_pinned,
6021                (unsigned long long)info->bytes_reserved,
6022                (unsigned long long)info->bytes_may_use,
6023                (unsigned long long)info->bytes_readonly);
6024         spin_unlock(&info->lock);
6025
6026         if (!dump_block_groups)
6027                 return;
6028
6029         down_read(&info->groups_sem);
6030 again:
6031         list_for_each_entry(cache, &info->block_groups[index], list) {
6032                 spin_lock(&cache->lock);
6033                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6034                        (unsigned long long)cache->key.objectid,
6035                        (unsigned long long)cache->key.offset,
6036                        (unsigned long long)btrfs_block_group_used(&cache->item),
6037                        (unsigned long long)cache->pinned,
6038                        (unsigned long long)cache->reserved,
6039                        cache->ro ? "[readonly]" : "");
6040                 btrfs_dump_free_space(cache, bytes);
6041                 spin_unlock(&cache->lock);
6042         }
6043         if (++index < BTRFS_NR_RAID_TYPES)
6044                 goto again;
6045         up_read(&info->groups_sem);
6046 }
6047
6048 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6049                          struct btrfs_root *root,
6050                          u64 num_bytes, u64 min_alloc_size,
6051                          u64 empty_size, u64 hint_byte,
6052                          struct btrfs_key *ins, u64 data)
6053 {
6054         bool final_tried = false;
6055         int ret;
6056
6057         data = btrfs_get_alloc_profile(root, data);
6058 again:
6059         WARN_ON(num_bytes < root->sectorsize);
6060         ret = find_free_extent(trans, root, num_bytes, empty_size,
6061                                hint_byte, ins, data);
6062
6063         if (ret == -ENOSPC) {
6064                 if (!final_tried) {
6065                         num_bytes = num_bytes >> 1;
6066                         num_bytes = num_bytes & ~(root->sectorsize - 1);
6067                         num_bytes = max(num_bytes, min_alloc_size);
6068                         if (num_bytes == min_alloc_size)
6069                                 final_tried = true;
6070                         goto again;
6071                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6072                         struct btrfs_space_info *sinfo;
6073
6074                         sinfo = __find_space_info(root->fs_info, data);
6075                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
6076                                "wanted %llu\n", (unsigned long long)data,
6077                                (unsigned long long)num_bytes);
6078                         if (sinfo)
6079                                 dump_space_info(sinfo, num_bytes, 1);
6080                 }
6081         }
6082
6083         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6084
6085         return ret;
6086 }
6087
6088 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6089                                         u64 start, u64 len, int pin)
6090 {
6091         struct btrfs_block_group_cache *cache;
6092         int ret = 0;
6093
6094         cache = btrfs_lookup_block_group(root->fs_info, start);
6095         if (!cache) {
6096                 printk(KERN_ERR "Unable to find block group for %llu\n",
6097                        (unsigned long long)start);
6098                 return -ENOSPC;
6099         }
6100
6101         if (btrfs_test_opt(root, DISCARD))
6102                 ret = btrfs_discard_extent(root, start, len, NULL);
6103
6104         if (pin)
6105                 pin_down_extent(root, cache, start, len, 1);
6106         else {
6107                 btrfs_add_free_space(cache, start, len);
6108                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6109         }
6110         btrfs_put_block_group(cache);
6111
6112         trace_btrfs_reserved_extent_free(root, start, len);
6113
6114         return ret;
6115 }
6116
6117 int btrfs_free_reserved_extent(struct btrfs_root *root,
6118                                         u64 start, u64 len)
6119 {
6120         return __btrfs_free_reserved_extent(root, start, len, 0);
6121 }
6122
6123 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6124                                        u64 start, u64 len)
6125 {
6126         return __btrfs_free_reserved_extent(root, start, len, 1);
6127 }
6128
6129 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6130                                       struct btrfs_root *root,
6131                                       u64 parent, u64 root_objectid,
6132                                       u64 flags, u64 owner, u64 offset,
6133                                       struct btrfs_key *ins, int ref_mod)
6134 {
6135         int ret;
6136         struct btrfs_fs_info *fs_info = root->fs_info;
6137         struct btrfs_extent_item *extent_item;
6138         struct btrfs_extent_inline_ref *iref;
6139         struct btrfs_path *path;
6140         struct extent_buffer *leaf;
6141         int type;
6142         u32 size;
6143
6144         if (parent > 0)
6145                 type = BTRFS_SHARED_DATA_REF_KEY;
6146         else
6147                 type = BTRFS_EXTENT_DATA_REF_KEY;
6148
6149         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6150
6151         path = btrfs_alloc_path();
6152         if (!path)
6153                 return -ENOMEM;
6154
6155         path->leave_spinning = 1;
6156         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6157                                       ins, size);
6158         if (ret) {
6159                 btrfs_free_path(path);
6160                 return ret;
6161         }
6162
6163         leaf = path->nodes[0];
6164         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6165                                      struct btrfs_extent_item);
6166         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6167         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6168         btrfs_set_extent_flags(leaf, extent_item,
6169                                flags | BTRFS_EXTENT_FLAG_DATA);
6170
6171         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6172         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6173         if (parent > 0) {
6174                 struct btrfs_shared_data_ref *ref;
6175                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6176                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6177                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6178         } else {
6179                 struct btrfs_extent_data_ref *ref;
6180                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6181                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6182                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6183                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6184                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6185         }
6186
6187         btrfs_mark_buffer_dirty(path->nodes[0]);
6188         btrfs_free_path(path);
6189
6190         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6191         if (ret) { /* -ENOENT, logic error */
6192                 printk(KERN_ERR "btrfs update block group failed for %llu "
6193                        "%llu\n", (unsigned long long)ins->objectid,
6194                        (unsigned long long)ins->offset);
6195                 BUG();
6196         }
6197         return ret;
6198 }
6199
6200 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6201                                      struct btrfs_root *root,
6202                                      u64 parent, u64 root_objectid,
6203                                      u64 flags, struct btrfs_disk_key *key,
6204                                      int level, struct btrfs_key *ins)
6205 {
6206         int ret;
6207         struct btrfs_fs_info *fs_info = root->fs_info;
6208         struct btrfs_extent_item *extent_item;
6209         struct btrfs_tree_block_info *block_info;
6210         struct btrfs_extent_inline_ref *iref;
6211         struct btrfs_path *path;
6212         struct extent_buffer *leaf;
6213         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6214
6215         path = btrfs_alloc_path();
6216         if (!path)
6217                 return -ENOMEM;
6218
6219         path->leave_spinning = 1;
6220         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6221                                       ins, size);
6222         if (ret) {
6223                 btrfs_free_path(path);
6224                 return ret;
6225         }
6226
6227         leaf = path->nodes[0];
6228         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6229                                      struct btrfs_extent_item);
6230         btrfs_set_extent_refs(leaf, extent_item, 1);
6231         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6232         btrfs_set_extent_flags(leaf, extent_item,
6233                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6234         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6235
6236         btrfs_set_tree_block_key(leaf, block_info, key);
6237         btrfs_set_tree_block_level(leaf, block_info, level);
6238
6239         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6240         if (parent > 0) {
6241                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6242                 btrfs_set_extent_inline_ref_type(leaf, iref,
6243                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6244                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6245         } else {
6246                 btrfs_set_extent_inline_ref_type(leaf, iref,
6247                                                  BTRFS_TREE_BLOCK_REF_KEY);
6248                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6249         }
6250
6251         btrfs_mark_buffer_dirty(leaf);
6252         btrfs_free_path(path);
6253
6254         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6255         if (ret) { /* -ENOENT, logic error */
6256                 printk(KERN_ERR "btrfs update block group failed for %llu "
6257                        "%llu\n", (unsigned long long)ins->objectid,
6258                        (unsigned long long)ins->offset);
6259                 BUG();
6260         }
6261         return ret;
6262 }
6263
6264 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6265                                      struct btrfs_root *root,
6266                                      u64 root_objectid, u64 owner,
6267                                      u64 offset, struct btrfs_key *ins)
6268 {
6269         int ret;
6270
6271         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6272
6273         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6274                                          ins->offset, 0,
6275                                          root_objectid, owner, offset,
6276                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6277         return ret;
6278 }
6279
6280 /*
6281  * this is used by the tree logging recovery code.  It records that
6282  * an extent has been allocated and makes sure to clear the free
6283  * space cache bits as well
6284  */
6285 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6286                                    struct btrfs_root *root,
6287                                    u64 root_objectid, u64 owner, u64 offset,
6288                                    struct btrfs_key *ins)
6289 {
6290         int ret;
6291         struct btrfs_block_group_cache *block_group;
6292         struct btrfs_caching_control *caching_ctl;
6293         u64 start = ins->objectid;
6294         u64 num_bytes = ins->offset;
6295
6296         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6297         cache_block_group(block_group, 0);
6298         caching_ctl = get_caching_control(block_group);
6299
6300         if (!caching_ctl) {
6301                 BUG_ON(!block_group_cache_done(block_group));
6302                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6303                 BUG_ON(ret); /* -ENOMEM */
6304         } else {
6305                 mutex_lock(&caching_ctl->mutex);
6306
6307                 if (start >= caching_ctl->progress) {
6308                         ret = add_excluded_extent(root, start, num_bytes);
6309                         BUG_ON(ret); /* -ENOMEM */
6310                 } else if (start + num_bytes <= caching_ctl->progress) {
6311                         ret = btrfs_remove_free_space(block_group,
6312                                                       start, num_bytes);
6313                         BUG_ON(ret); /* -ENOMEM */
6314                 } else {
6315                         num_bytes = caching_ctl->progress - start;
6316                         ret = btrfs_remove_free_space(block_group,
6317                                                       start, num_bytes);
6318                         BUG_ON(ret); /* -ENOMEM */
6319
6320                         start = caching_ctl->progress;
6321                         num_bytes = ins->objectid + ins->offset -
6322                                     caching_ctl->progress;
6323                         ret = add_excluded_extent(root, start, num_bytes);
6324                         BUG_ON(ret); /* -ENOMEM */
6325                 }
6326
6327                 mutex_unlock(&caching_ctl->mutex);
6328                 put_caching_control(caching_ctl);
6329         }
6330
6331         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6332                                           RESERVE_ALLOC_NO_ACCOUNT);
6333         BUG_ON(ret); /* logic error */
6334         btrfs_put_block_group(block_group);
6335         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6336                                          0, owner, offset, ins, 1);
6337         return ret;
6338 }
6339
6340 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6341                                             struct btrfs_root *root,
6342                                             u64 bytenr, u32 blocksize,
6343                                             int level)
6344 {
6345         struct extent_buffer *buf;
6346
6347         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6348         if (!buf)
6349                 return ERR_PTR(-ENOMEM);
6350         btrfs_set_header_generation(buf, trans->transid);
6351         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6352         btrfs_tree_lock(buf);
6353         clean_tree_block(trans, root, buf);
6354         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6355
6356         btrfs_set_lock_blocking(buf);
6357         btrfs_set_buffer_uptodate(buf);
6358
6359         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6360                 /*
6361                  * we allow two log transactions at a time, use different
6362                  * EXENT bit to differentiate dirty pages.
6363                  */
6364                 if (root->log_transid % 2 == 0)
6365                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6366                                         buf->start + buf->len - 1, GFP_NOFS);
6367                 else
6368                         set_extent_new(&root->dirty_log_pages, buf->start,
6369                                         buf->start + buf->len - 1, GFP_NOFS);
6370         } else {
6371                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6372                          buf->start + buf->len - 1, GFP_NOFS);
6373         }
6374         trans->blocks_used++;
6375         /* this returns a buffer locked for blocking */
6376         return buf;
6377 }
6378
6379 static struct btrfs_block_rsv *
6380 use_block_rsv(struct btrfs_trans_handle *trans,
6381               struct btrfs_root *root, u32 blocksize)
6382 {
6383         struct btrfs_block_rsv *block_rsv;
6384         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6385         int ret;
6386
6387         block_rsv = get_block_rsv(trans, root);
6388
6389         if (block_rsv->size == 0) {
6390                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6391                                              BTRFS_RESERVE_NO_FLUSH);
6392                 /*
6393                  * If we couldn't reserve metadata bytes try and use some from
6394                  * the global reserve.
6395                  */
6396                 if (ret && block_rsv != global_rsv) {
6397                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6398                         if (!ret)
6399                                 return global_rsv;
6400                         return ERR_PTR(ret);
6401                 } else if (ret) {
6402                         return ERR_PTR(ret);
6403                 }
6404                 return block_rsv;
6405         }
6406
6407         ret = block_rsv_use_bytes(block_rsv, blocksize);
6408         if (!ret)
6409                 return block_rsv;
6410         if (ret && !block_rsv->failfast) {
6411                 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6412                         static DEFINE_RATELIMIT_STATE(_rs,
6413                                         DEFAULT_RATELIMIT_INTERVAL * 10,
6414                                         /*DEFAULT_RATELIMIT_BURST*/ 1);
6415                         if (__ratelimit(&_rs))
6416                                 WARN(1, KERN_DEBUG
6417                                         "btrfs: block rsv returned %d\n", ret);
6418                 }
6419                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6420                                              BTRFS_RESERVE_NO_FLUSH);
6421                 if (!ret) {
6422                         return block_rsv;
6423                 } else if (ret && block_rsv != global_rsv) {
6424                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6425                         if (!ret)
6426                                 return global_rsv;
6427                 }
6428         }
6429
6430         return ERR_PTR(-ENOSPC);
6431 }
6432
6433 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6434                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6435 {
6436         block_rsv_add_bytes(block_rsv, blocksize, 0);
6437         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6438 }
6439
6440 /*
6441  * finds a free extent and does all the dirty work required for allocation
6442  * returns the key for the extent through ins, and a tree buffer for
6443  * the first block of the extent through buf.
6444  *
6445  * returns the tree buffer or NULL.
6446  */
6447 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6448                                         struct btrfs_root *root, u32 blocksize,
6449                                         u64 parent, u64 root_objectid,
6450                                         struct btrfs_disk_key *key, int level,
6451                                         u64 hint, u64 empty_size)
6452 {
6453         struct btrfs_key ins;
6454         struct btrfs_block_rsv *block_rsv;
6455         struct extent_buffer *buf;
6456         u64 flags = 0;
6457         int ret;
6458
6459
6460         block_rsv = use_block_rsv(trans, root, blocksize);
6461         if (IS_ERR(block_rsv))
6462                 return ERR_CAST(block_rsv);
6463
6464         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6465                                    empty_size, hint, &ins, 0);
6466         if (ret) {
6467                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6468                 return ERR_PTR(ret);
6469         }
6470
6471         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6472                                     blocksize, level);
6473         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6474
6475         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6476                 if (parent == 0)
6477                         parent = ins.objectid;
6478                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6479         } else
6480                 BUG_ON(parent > 0);
6481
6482         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6483                 struct btrfs_delayed_extent_op *extent_op;
6484                 extent_op = btrfs_alloc_delayed_extent_op();
6485                 BUG_ON(!extent_op); /* -ENOMEM */
6486                 if (key)
6487                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6488                 else
6489                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6490                 extent_op->flags_to_set = flags;
6491                 extent_op->update_key = 1;
6492                 extent_op->update_flags = 1;
6493                 extent_op->is_data = 0;
6494
6495                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6496                                         ins.objectid,
6497                                         ins.offset, parent, root_objectid,
6498                                         level, BTRFS_ADD_DELAYED_EXTENT,
6499                                         extent_op, 0);
6500                 BUG_ON(ret); /* -ENOMEM */
6501         }
6502         return buf;
6503 }
6504
6505 struct walk_control {
6506         u64 refs[BTRFS_MAX_LEVEL];
6507         u64 flags[BTRFS_MAX_LEVEL];
6508         struct btrfs_key update_progress;
6509         int stage;
6510         int level;
6511         int shared_level;
6512         int update_ref;
6513         int keep_locks;
6514         int reada_slot;
6515         int reada_count;
6516         int for_reloc;
6517 };
6518
6519 #define DROP_REFERENCE  1
6520 #define UPDATE_BACKREF  2
6521
6522 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6523                                      struct btrfs_root *root,
6524                                      struct walk_control *wc,
6525                                      struct btrfs_path *path)
6526 {
6527         u64 bytenr;
6528         u64 generation;
6529         u64 refs;
6530         u64 flags;
6531         u32 nritems;
6532         u32 blocksize;
6533         struct btrfs_key key;
6534         struct extent_buffer *eb;
6535         int ret;
6536         int slot;
6537         int nread = 0;
6538
6539         if (path->slots[wc->level] < wc->reada_slot) {
6540                 wc->reada_count = wc->reada_count * 2 / 3;
6541                 wc->reada_count = max(wc->reada_count, 2);
6542         } else {
6543                 wc->reada_count = wc->reada_count * 3 / 2;
6544                 wc->reada_count = min_t(int, wc->reada_count,
6545                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6546         }
6547
6548         eb = path->nodes[wc->level];
6549         nritems = btrfs_header_nritems(eb);
6550         blocksize = btrfs_level_size(root, wc->level - 1);
6551
6552         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6553                 if (nread >= wc->reada_count)
6554                         break;
6555
6556                 cond_resched();
6557                 bytenr = btrfs_node_blockptr(eb, slot);
6558                 generation = btrfs_node_ptr_generation(eb, slot);
6559
6560                 if (slot == path->slots[wc->level])
6561                         goto reada;
6562
6563                 if (wc->stage == UPDATE_BACKREF &&
6564                     generation <= root->root_key.offset)
6565                         continue;
6566
6567                 /* We don't lock the tree block, it's OK to be racy here */
6568                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6569                                                &refs, &flags);
6570                 /* We don't care about errors in readahead. */
6571                 if (ret < 0)
6572                         continue;
6573                 BUG_ON(refs == 0);
6574
6575                 if (wc->stage == DROP_REFERENCE) {
6576                         if (refs == 1)
6577                                 goto reada;
6578
6579                         if (wc->level == 1 &&
6580                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6581                                 continue;
6582                         if (!wc->update_ref ||
6583                             generation <= root->root_key.offset)
6584                                 continue;
6585                         btrfs_node_key_to_cpu(eb, &key, slot);
6586                         ret = btrfs_comp_cpu_keys(&key,
6587                                                   &wc->update_progress);
6588                         if (ret < 0)
6589                                 continue;
6590                 } else {
6591                         if (wc->level == 1 &&
6592                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6593                                 continue;
6594                 }
6595 reada:
6596                 ret = readahead_tree_block(root, bytenr, blocksize,
6597                                            generation);
6598                 if (ret)
6599                         break;
6600                 nread++;
6601         }
6602         wc->reada_slot = slot;
6603 }
6604
6605 /*
6606  * hepler to process tree block while walking down the tree.
6607  *
6608  * when wc->stage == UPDATE_BACKREF, this function updates
6609  * back refs for pointers in the block.
6610  *
6611  * NOTE: return value 1 means we should stop walking down.
6612  */
6613 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6614                                    struct btrfs_root *root,
6615                                    struct btrfs_path *path,
6616                                    struct walk_control *wc, int lookup_info)
6617 {
6618         int level = wc->level;
6619         struct extent_buffer *eb = path->nodes[level];
6620         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6621         int ret;
6622
6623         if (wc->stage == UPDATE_BACKREF &&
6624             btrfs_header_owner(eb) != root->root_key.objectid)
6625                 return 1;
6626
6627         /*
6628          * when reference count of tree block is 1, it won't increase
6629          * again. once full backref flag is set, we never clear it.
6630          */
6631         if (lookup_info &&
6632             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6633              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6634                 BUG_ON(!path->locks[level]);
6635                 ret = btrfs_lookup_extent_info(trans, root,
6636                                                eb->start, eb->len,
6637                                                &wc->refs[level],
6638                                                &wc->flags[level]);
6639                 BUG_ON(ret == -ENOMEM);
6640                 if (ret)
6641                         return ret;
6642                 BUG_ON(wc->refs[level] == 0);
6643         }
6644
6645         if (wc->stage == DROP_REFERENCE) {
6646                 if (wc->refs[level] > 1)
6647                         return 1;
6648
6649                 if (path->locks[level] && !wc->keep_locks) {
6650                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6651                         path->locks[level] = 0;
6652                 }
6653                 return 0;
6654         }
6655
6656         /* wc->stage == UPDATE_BACKREF */
6657         if (!(wc->flags[level] & flag)) {
6658                 BUG_ON(!path->locks[level]);
6659                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6660                 BUG_ON(ret); /* -ENOMEM */
6661                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6662                 BUG_ON(ret); /* -ENOMEM */
6663                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6664                                                   eb->len, flag, 0);
6665                 BUG_ON(ret); /* -ENOMEM */
6666                 wc->flags[level] |= flag;
6667         }
6668
6669         /*
6670          * the block is shared by multiple trees, so it's not good to
6671          * keep the tree lock
6672          */
6673         if (path->locks[level] && level > 0) {
6674                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6675                 path->locks[level] = 0;
6676         }
6677         return 0;
6678 }
6679
6680 /*
6681  * hepler to process tree block pointer.
6682  *
6683  * when wc->stage == DROP_REFERENCE, this function checks
6684  * reference count of the block pointed to. if the block
6685  * is shared and we need update back refs for the subtree
6686  * rooted at the block, this function changes wc->stage to
6687  * UPDATE_BACKREF. if the block is shared and there is no
6688  * need to update back, this function drops the reference
6689  * to the block.
6690  *
6691  * NOTE: return value 1 means we should stop walking down.
6692  */
6693 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6694                                  struct btrfs_root *root,
6695                                  struct btrfs_path *path,
6696                                  struct walk_control *wc, int *lookup_info)
6697 {
6698         u64 bytenr;
6699         u64 generation;
6700         u64 parent;
6701         u32 blocksize;
6702         struct btrfs_key key;
6703         struct extent_buffer *next;
6704         int level = wc->level;
6705         int reada = 0;
6706         int ret = 0;
6707
6708         generation = btrfs_node_ptr_generation(path->nodes[level],
6709                                                path->slots[level]);
6710         /*
6711          * if the lower level block was created before the snapshot
6712          * was created, we know there is no need to update back refs
6713          * for the subtree
6714          */
6715         if (wc->stage == UPDATE_BACKREF &&
6716             generation <= root->root_key.offset) {
6717                 *lookup_info = 1;
6718                 return 1;
6719         }
6720
6721         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6722         blocksize = btrfs_level_size(root, level - 1);
6723
6724         next = btrfs_find_tree_block(root, bytenr, blocksize);
6725         if (!next) {
6726                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6727                 if (!next)
6728                         return -ENOMEM;
6729                 reada = 1;
6730         }
6731         btrfs_tree_lock(next);
6732         btrfs_set_lock_blocking(next);
6733
6734         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6735                                        &wc->refs[level - 1],
6736                                        &wc->flags[level - 1]);
6737         if (ret < 0) {
6738                 btrfs_tree_unlock(next);
6739                 return ret;
6740         }
6741
6742         BUG_ON(wc->refs[level - 1] == 0);
6743         *lookup_info = 0;
6744
6745         if (wc->stage == DROP_REFERENCE) {
6746                 if (wc->refs[level - 1] > 1) {
6747                         if (level == 1 &&
6748                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6749                                 goto skip;
6750
6751                         if (!wc->update_ref ||
6752                             generation <= root->root_key.offset)
6753                                 goto skip;
6754
6755                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6756                                               path->slots[level]);
6757                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6758                         if (ret < 0)
6759                                 goto skip;
6760
6761                         wc->stage = UPDATE_BACKREF;
6762                         wc->shared_level = level - 1;
6763                 }
6764         } else {
6765                 if (level == 1 &&
6766                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6767                         goto skip;
6768         }
6769
6770         if (!btrfs_buffer_uptodate(next, generation, 0)) {
6771                 btrfs_tree_unlock(next);
6772                 free_extent_buffer(next);
6773                 next = NULL;
6774                 *lookup_info = 1;
6775         }
6776
6777         if (!next) {
6778                 if (reada && level == 1)
6779                         reada_walk_down(trans, root, wc, path);
6780                 next = read_tree_block(root, bytenr, blocksize, generation);
6781                 if (!next)
6782                         return -EIO;
6783                 btrfs_tree_lock(next);
6784                 btrfs_set_lock_blocking(next);
6785         }
6786
6787         level--;
6788         BUG_ON(level != btrfs_header_level(next));
6789         path->nodes[level] = next;
6790         path->slots[level] = 0;
6791         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6792         wc->level = level;
6793         if (wc->level == 1)
6794                 wc->reada_slot = 0;
6795         return 0;
6796 skip:
6797         wc->refs[level - 1] = 0;
6798         wc->flags[level - 1] = 0;
6799         if (wc->stage == DROP_REFERENCE) {
6800                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6801                         parent = path->nodes[level]->start;
6802                 } else {
6803                         BUG_ON(root->root_key.objectid !=
6804                                btrfs_header_owner(path->nodes[level]));
6805                         parent = 0;
6806                 }
6807
6808                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6809                                 root->root_key.objectid, level - 1, 0, 0);
6810                 BUG_ON(ret); /* -ENOMEM */
6811         }
6812         btrfs_tree_unlock(next);
6813         free_extent_buffer(next);
6814         *lookup_info = 1;
6815         return 1;
6816 }
6817
6818 /*
6819  * hepler to process tree block while walking up the tree.
6820  *
6821  * when wc->stage == DROP_REFERENCE, this function drops
6822  * reference count on the block.
6823  *
6824  * when wc->stage == UPDATE_BACKREF, this function changes
6825  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6826  * to UPDATE_BACKREF previously while processing the block.
6827  *
6828  * NOTE: return value 1 means we should stop walking up.
6829  */
6830 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6831                                  struct btrfs_root *root,
6832                                  struct btrfs_path *path,
6833                                  struct walk_control *wc)
6834 {
6835         int ret;
6836         int level = wc->level;
6837         struct extent_buffer *eb = path->nodes[level];
6838         u64 parent = 0;
6839
6840         if (wc->stage == UPDATE_BACKREF) {
6841                 BUG_ON(wc->shared_level < level);
6842                 if (level < wc->shared_level)
6843                         goto out;
6844
6845                 ret = find_next_key(path, level + 1, &wc->update_progress);
6846                 if (ret > 0)
6847                         wc->update_ref = 0;
6848
6849                 wc->stage = DROP_REFERENCE;
6850                 wc->shared_level = -1;
6851                 path->slots[level] = 0;
6852
6853                 /*
6854                  * check reference count again if the block isn't locked.
6855                  * we should start walking down the tree again if reference
6856                  * count is one.
6857                  */
6858                 if (!path->locks[level]) {
6859                         BUG_ON(level == 0);
6860                         btrfs_tree_lock(eb);
6861                         btrfs_set_lock_blocking(eb);
6862                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6863
6864                         ret = btrfs_lookup_extent_info(trans, root,
6865                                                        eb->start, eb->len,
6866                                                        &wc->refs[level],
6867                                                        &wc->flags[level]);
6868                         if (ret < 0) {
6869                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6870                                 path->locks[level] = 0;
6871                                 return ret;
6872                         }
6873                         BUG_ON(wc->refs[level] == 0);
6874                         if (wc->refs[level] == 1) {
6875                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6876                                 path->locks[level] = 0;
6877                                 return 1;
6878                         }
6879                 }
6880         }
6881
6882         /* wc->stage == DROP_REFERENCE */
6883         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6884
6885         if (wc->refs[level] == 1) {
6886                 if (level == 0) {
6887                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6888                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6889                                                     wc->for_reloc);
6890                         else
6891                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6892                                                     wc->for_reloc);
6893                         BUG_ON(ret); /* -ENOMEM */
6894                 }
6895                 /* make block locked assertion in clean_tree_block happy */
6896                 if (!path->locks[level] &&
6897                     btrfs_header_generation(eb) == trans->transid) {
6898                         btrfs_tree_lock(eb);
6899                         btrfs_set_lock_blocking(eb);
6900                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6901                 }
6902                 clean_tree_block(trans, root, eb);
6903         }
6904
6905         if (eb == root->node) {
6906                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6907                         parent = eb->start;
6908                 else
6909                         BUG_ON(root->root_key.objectid !=
6910                                btrfs_header_owner(eb));
6911         } else {
6912                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6913                         parent = path->nodes[level + 1]->start;
6914                 else
6915                         BUG_ON(root->root_key.objectid !=
6916                                btrfs_header_owner(path->nodes[level + 1]));
6917         }
6918
6919         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6920 out:
6921         wc->refs[level] = 0;
6922         wc->flags[level] = 0;
6923         return 0;
6924 }
6925
6926 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6927                                    struct btrfs_root *root,
6928                                    struct btrfs_path *path,
6929                                    struct walk_control *wc)
6930 {
6931         int level = wc->level;
6932         int lookup_info = 1;
6933         int ret;
6934
6935         while (level >= 0) {
6936                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6937                 if (ret > 0)
6938                         break;
6939
6940                 if (level == 0)
6941                         break;
6942
6943                 if (path->slots[level] >=
6944                     btrfs_header_nritems(path->nodes[level]))
6945                         break;
6946
6947                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6948                 if (ret > 0) {
6949                         path->slots[level]++;
6950                         continue;
6951                 } else if (ret < 0)
6952                         return ret;
6953                 level = wc->level;
6954         }
6955         return 0;
6956 }
6957
6958 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6959                                  struct btrfs_root *root,
6960                                  struct btrfs_path *path,
6961                                  struct walk_control *wc, int max_level)
6962 {
6963         int level = wc->level;
6964         int ret;
6965
6966         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6967         while (level < max_level && path->nodes[level]) {
6968                 wc->level = level;
6969                 if (path->slots[level] + 1 <
6970                     btrfs_header_nritems(path->nodes[level])) {
6971                         path->slots[level]++;
6972                         return 0;
6973                 } else {
6974                         ret = walk_up_proc(trans, root, path, wc);
6975                         if (ret > 0)
6976                                 return 0;
6977
6978                         if (path->locks[level]) {
6979                                 btrfs_tree_unlock_rw(path->nodes[level],
6980                                                      path->locks[level]);
6981                                 path->locks[level] = 0;
6982                         }
6983                         free_extent_buffer(path->nodes[level]);
6984                         path->nodes[level] = NULL;
6985                         level++;
6986                 }
6987         }
6988         return 1;
6989 }
6990
6991 /*
6992  * drop a subvolume tree.
6993  *
6994  * this function traverses the tree freeing any blocks that only
6995  * referenced by the tree.
6996  *
6997  * when a shared tree block is found. this function decreases its
6998  * reference count by one. if update_ref is true, this function
6999  * also make sure backrefs for the shared block and all lower level
7000  * blocks are properly updated.
7001  */
7002 int btrfs_drop_snapshot(struct btrfs_root *root,
7003                          struct btrfs_block_rsv *block_rsv, int update_ref,
7004                          int for_reloc)
7005 {
7006         struct btrfs_path *path;
7007         struct btrfs_trans_handle *trans;
7008         struct btrfs_root *tree_root = root->fs_info->tree_root;
7009         struct btrfs_root_item *root_item = &root->root_item;
7010         struct walk_control *wc;
7011         struct btrfs_key key;
7012         int err = 0;
7013         int ret;
7014         int level;
7015
7016         path = btrfs_alloc_path();
7017         if (!path) {
7018                 err = -ENOMEM;
7019                 goto out;
7020         }
7021
7022         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7023         if (!wc) {
7024                 btrfs_free_path(path);
7025                 err = -ENOMEM;
7026                 goto out;
7027         }
7028
7029         trans = btrfs_start_transaction(tree_root, 0);
7030         if (IS_ERR(trans)) {
7031                 err = PTR_ERR(trans);
7032                 goto out_free;
7033         }
7034
7035         if (block_rsv)
7036                 trans->block_rsv = block_rsv;
7037
7038         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7039                 level = btrfs_header_level(root->node);
7040                 path->nodes[level] = btrfs_lock_root_node(root);
7041                 btrfs_set_lock_blocking(path->nodes[level]);
7042                 path->slots[level] = 0;
7043                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7044                 memset(&wc->update_progress, 0,
7045                        sizeof(wc->update_progress));
7046         } else {
7047                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7048                 memcpy(&wc->update_progress, &key,
7049                        sizeof(wc->update_progress));
7050
7051                 level = root_item->drop_level;
7052                 BUG_ON(level == 0);
7053                 path->lowest_level = level;
7054                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7055                 path->lowest_level = 0;
7056                 if (ret < 0) {
7057                         err = ret;
7058                         goto out_end_trans;
7059                 }
7060                 WARN_ON(ret > 0);
7061
7062                 /*
7063                  * unlock our path, this is safe because only this
7064                  * function is allowed to delete this snapshot
7065                  */
7066                 btrfs_unlock_up_safe(path, 0);
7067
7068                 level = btrfs_header_level(root->node);
7069                 while (1) {
7070                         btrfs_tree_lock(path->nodes[level]);
7071                         btrfs_set_lock_blocking(path->nodes[level]);
7072
7073                         ret = btrfs_lookup_extent_info(trans, root,
7074                                                 path->nodes[level]->start,
7075                                                 path->nodes[level]->len,
7076                                                 &wc->refs[level],
7077                                                 &wc->flags[level]);
7078                         if (ret < 0) {
7079                                 err = ret;
7080                                 goto out_end_trans;
7081                         }
7082                         BUG_ON(wc->refs[level] == 0);
7083
7084                         if (level == root_item->drop_level)
7085                                 break;
7086
7087                         btrfs_tree_unlock(path->nodes[level]);
7088                         WARN_ON(wc->refs[level] != 1);
7089                         level--;
7090                 }
7091         }
7092
7093         wc->level = level;
7094         wc->shared_level = -1;
7095         wc->stage = DROP_REFERENCE;
7096         wc->update_ref = update_ref;
7097         wc->keep_locks = 0;
7098         wc->for_reloc = for_reloc;
7099         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7100
7101         while (1) {
7102                 ret = walk_down_tree(trans, root, path, wc);
7103                 if (ret < 0) {
7104                         err = ret;
7105                         break;
7106                 }
7107
7108                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7109                 if (ret < 0) {
7110                         err = ret;
7111                         break;
7112                 }
7113
7114                 if (ret > 0) {
7115                         BUG_ON(wc->stage != DROP_REFERENCE);
7116                         break;
7117                 }
7118
7119                 if (wc->stage == DROP_REFERENCE) {
7120                         level = wc->level;
7121                         btrfs_node_key(path->nodes[level],
7122                                        &root_item->drop_progress,
7123                                        path->slots[level]);
7124                         root_item->drop_level = level;
7125                 }
7126
7127                 BUG_ON(wc->level == 0);
7128                 if (btrfs_should_end_transaction(trans, tree_root)) {
7129                         ret = btrfs_update_root(trans, tree_root,
7130                                                 &root->root_key,
7131                                                 root_item);
7132                         if (ret) {
7133                                 btrfs_abort_transaction(trans, tree_root, ret);
7134                                 err = ret;
7135                                 goto out_end_trans;
7136                         }
7137
7138                         btrfs_end_transaction_throttle(trans, tree_root);
7139                         trans = btrfs_start_transaction(tree_root, 0);
7140                         if (IS_ERR(trans)) {
7141                                 err = PTR_ERR(trans);
7142                                 goto out_free;
7143                         }
7144                         if (block_rsv)
7145                                 trans->block_rsv = block_rsv;
7146                 }
7147         }
7148         btrfs_release_path(path);
7149         if (err)
7150                 goto out_end_trans;
7151
7152         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7153         if (ret) {
7154                 btrfs_abort_transaction(trans, tree_root, ret);
7155                 goto out_end_trans;
7156         }
7157
7158         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7159                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7160                                            NULL, NULL);
7161                 if (ret < 0) {
7162                         btrfs_abort_transaction(trans, tree_root, ret);
7163                         err = ret;
7164                         goto out_end_trans;
7165                 } else if (ret > 0) {
7166                         /* if we fail to delete the orphan item this time
7167                          * around, it'll get picked up the next time.
7168                          *
7169                          * The most common failure here is just -ENOENT.
7170                          */
7171                         btrfs_del_orphan_item(trans, tree_root,
7172                                               root->root_key.objectid);
7173                 }
7174         }
7175
7176         if (root->in_radix) {
7177                 btrfs_free_fs_root(tree_root->fs_info, root);
7178         } else {
7179                 free_extent_buffer(root->node);
7180                 free_extent_buffer(root->commit_root);
7181                 kfree(root);
7182         }
7183 out_end_trans:
7184         btrfs_end_transaction_throttle(trans, tree_root);
7185 out_free:
7186         kfree(wc);
7187         btrfs_free_path(path);
7188 out:
7189         if (err)
7190                 btrfs_std_error(root->fs_info, err);
7191         return err;
7192 }
7193
7194 /*
7195  * drop subtree rooted at tree block 'node'.
7196  *
7197  * NOTE: this function will unlock and release tree block 'node'
7198  * only used by relocation code
7199  */
7200 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7201                         struct btrfs_root *root,
7202                         struct extent_buffer *node,
7203                         struct extent_buffer *parent)
7204 {
7205         struct btrfs_path *path;
7206         struct walk_control *wc;
7207         int level;
7208         int parent_level;
7209         int ret = 0;
7210         int wret;
7211
7212         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7213
7214         path = btrfs_alloc_path();
7215         if (!path)
7216                 return -ENOMEM;
7217
7218         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7219         if (!wc) {
7220                 btrfs_free_path(path);
7221                 return -ENOMEM;
7222         }
7223
7224         btrfs_assert_tree_locked(parent);
7225         parent_level = btrfs_header_level(parent);
7226         extent_buffer_get(parent);
7227         path->nodes[parent_level] = parent;
7228         path->slots[parent_level] = btrfs_header_nritems(parent);
7229
7230         btrfs_assert_tree_locked(node);
7231         level = btrfs_header_level(node);
7232         path->nodes[level] = node;
7233         path->slots[level] = 0;
7234         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7235
7236         wc->refs[parent_level] = 1;
7237         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7238         wc->level = level;
7239         wc->shared_level = -1;
7240         wc->stage = DROP_REFERENCE;
7241         wc->update_ref = 0;
7242         wc->keep_locks = 1;
7243         wc->for_reloc = 1;
7244         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7245
7246         while (1) {
7247                 wret = walk_down_tree(trans, root, path, wc);
7248                 if (wret < 0) {
7249                         ret = wret;
7250                         break;
7251                 }
7252
7253                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7254                 if (wret < 0)
7255                         ret = wret;
7256                 if (wret != 0)
7257                         break;
7258         }
7259
7260         kfree(wc);
7261         btrfs_free_path(path);
7262         return ret;
7263 }
7264
7265 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7266 {
7267         u64 num_devices;
7268         u64 stripped;
7269
7270         /*
7271          * if restripe for this chunk_type is on pick target profile and
7272          * return, otherwise do the usual balance
7273          */
7274         stripped = get_restripe_target(root->fs_info, flags);
7275         if (stripped)
7276                 return extended_to_chunk(stripped);
7277
7278         /*
7279          * we add in the count of missing devices because we want
7280          * to make sure that any RAID levels on a degraded FS
7281          * continue to be honored.
7282          */
7283         num_devices = root->fs_info->fs_devices->rw_devices +
7284                 root->fs_info->fs_devices->missing_devices;
7285
7286         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7287                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7288
7289         if (num_devices == 1) {
7290                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7291                 stripped = flags & ~stripped;
7292
7293                 /* turn raid0 into single device chunks */
7294                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7295                         return stripped;
7296
7297                 /* turn mirroring into duplication */
7298                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7299                              BTRFS_BLOCK_GROUP_RAID10))
7300                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7301         } else {
7302                 /* they already had raid on here, just return */
7303                 if (flags & stripped)
7304                         return flags;
7305
7306                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7307                 stripped = flags & ~stripped;
7308
7309                 /* switch duplicated blocks with raid1 */
7310                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7311                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7312
7313                 /* this is drive concat, leave it alone */
7314         }
7315
7316         return flags;
7317 }
7318
7319 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7320 {
7321         struct btrfs_space_info *sinfo = cache->space_info;
7322         u64 num_bytes;
7323         u64 min_allocable_bytes;
7324         int ret = -ENOSPC;
7325
7326
7327         /*
7328          * We need some metadata space and system metadata space for
7329          * allocating chunks in some corner cases until we force to set
7330          * it to be readonly.
7331          */
7332         if ((sinfo->flags &
7333              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7334             !force)
7335                 min_allocable_bytes = 1 * 1024 * 1024;
7336         else
7337                 min_allocable_bytes = 0;
7338
7339         spin_lock(&sinfo->lock);
7340         spin_lock(&cache->lock);
7341
7342         if (cache->ro) {
7343                 ret = 0;
7344                 goto out;
7345         }
7346
7347         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7348                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7349
7350         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7351             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7352             min_allocable_bytes <= sinfo->total_bytes) {
7353                 sinfo->bytes_readonly += num_bytes;
7354                 cache->ro = 1;
7355                 ret = 0;
7356         }
7357 out:
7358         spin_unlock(&cache->lock);
7359         spin_unlock(&sinfo->lock);
7360         return ret;
7361 }
7362
7363 int btrfs_set_block_group_ro(struct btrfs_root *root,
7364                              struct btrfs_block_group_cache *cache)
7365
7366 {
7367         struct btrfs_trans_handle *trans;
7368         u64 alloc_flags;
7369         int ret;
7370
7371         BUG_ON(cache->ro);
7372
7373         trans = btrfs_join_transaction(root);
7374         if (IS_ERR(trans))
7375                 return PTR_ERR(trans);
7376
7377         alloc_flags = update_block_group_flags(root, cache->flags);
7378         if (alloc_flags != cache->flags) {
7379                 ret = do_chunk_alloc(trans, root, alloc_flags,
7380                                      CHUNK_ALLOC_FORCE);
7381                 if (ret < 0)
7382                         goto out;
7383         }
7384
7385         ret = set_block_group_ro(cache, 0);
7386         if (!ret)
7387                 goto out;
7388         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7389         ret = do_chunk_alloc(trans, root, alloc_flags,
7390                              CHUNK_ALLOC_FORCE);
7391         if (ret < 0)
7392                 goto out;
7393         ret = set_block_group_ro(cache, 0);
7394 out:
7395         btrfs_end_transaction(trans, root);
7396         return ret;
7397 }
7398
7399 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7400                             struct btrfs_root *root, u64 type)
7401 {
7402         u64 alloc_flags = get_alloc_profile(root, type);
7403         return do_chunk_alloc(trans, root, alloc_flags,
7404                               CHUNK_ALLOC_FORCE);
7405 }
7406
7407 /*
7408  * helper to account the unused space of all the readonly block group in the
7409  * list. takes mirrors into account.
7410  */
7411 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7412 {
7413         struct btrfs_block_group_cache *block_group;
7414         u64 free_bytes = 0;
7415         int factor;
7416
7417         list_for_each_entry(block_group, groups_list, list) {
7418                 spin_lock(&block_group->lock);
7419
7420                 if (!block_group->ro) {
7421                         spin_unlock(&block_group->lock);
7422                         continue;
7423                 }
7424
7425                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7426                                           BTRFS_BLOCK_GROUP_RAID10 |
7427                                           BTRFS_BLOCK_GROUP_DUP))
7428                         factor = 2;
7429                 else
7430                         factor = 1;
7431
7432                 free_bytes += (block_group->key.offset -
7433                                btrfs_block_group_used(&block_group->item)) *
7434                                factor;
7435
7436                 spin_unlock(&block_group->lock);
7437         }
7438
7439         return free_bytes;
7440 }
7441
7442 /*
7443  * helper to account the unused space of all the readonly block group in the
7444  * space_info. takes mirrors into account.
7445  */
7446 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7447 {
7448         int i;
7449         u64 free_bytes = 0;
7450
7451         spin_lock(&sinfo->lock);
7452
7453         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7454                 if (!list_empty(&sinfo->block_groups[i]))
7455                         free_bytes += __btrfs_get_ro_block_group_free_space(
7456                                                 &sinfo->block_groups[i]);
7457
7458         spin_unlock(&sinfo->lock);
7459
7460         return free_bytes;
7461 }
7462
7463 void btrfs_set_block_group_rw(struct btrfs_root *root,
7464                               struct btrfs_block_group_cache *cache)
7465 {
7466         struct btrfs_space_info *sinfo = cache->space_info;
7467         u64 num_bytes;
7468
7469         BUG_ON(!cache->ro);
7470
7471         spin_lock(&sinfo->lock);
7472         spin_lock(&cache->lock);
7473         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7474                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7475         sinfo->bytes_readonly -= num_bytes;
7476         cache->ro = 0;
7477         spin_unlock(&cache->lock);
7478         spin_unlock(&sinfo->lock);
7479 }
7480
7481 /*
7482  * checks to see if its even possible to relocate this block group.
7483  *
7484  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7485  * ok to go ahead and try.
7486  */
7487 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7488 {
7489         struct btrfs_block_group_cache *block_group;
7490         struct btrfs_space_info *space_info;
7491         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7492         struct btrfs_device *device;
7493         u64 min_free;
7494         u64 dev_min = 1;
7495         u64 dev_nr = 0;
7496         u64 target;
7497         int index;
7498         int full = 0;
7499         int ret = 0;
7500
7501         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7502
7503         /* odd, couldn't find the block group, leave it alone */
7504         if (!block_group)
7505                 return -1;
7506
7507         min_free = btrfs_block_group_used(&block_group->item);
7508
7509         /* no bytes used, we're good */
7510         if (!min_free)
7511                 goto out;
7512
7513         space_info = block_group->space_info;
7514         spin_lock(&space_info->lock);
7515
7516         full = space_info->full;
7517
7518         /*
7519          * if this is the last block group we have in this space, we can't
7520          * relocate it unless we're able to allocate a new chunk below.
7521          *
7522          * Otherwise, we need to make sure we have room in the space to handle
7523          * all of the extents from this block group.  If we can, we're good
7524          */
7525         if ((space_info->total_bytes != block_group->key.offset) &&
7526             (space_info->bytes_used + space_info->bytes_reserved +
7527              space_info->bytes_pinned + space_info->bytes_readonly +
7528              min_free < space_info->total_bytes)) {
7529                 spin_unlock(&space_info->lock);
7530                 goto out;
7531         }
7532         spin_unlock(&space_info->lock);
7533
7534         /*
7535          * ok we don't have enough space, but maybe we have free space on our
7536          * devices to allocate new chunks for relocation, so loop through our
7537          * alloc devices and guess if we have enough space.  if this block
7538          * group is going to be restriped, run checks against the target
7539          * profile instead of the current one.
7540          */
7541         ret = -1;
7542
7543         /*
7544          * index:
7545          *      0: raid10
7546          *      1: raid1
7547          *      2: dup
7548          *      3: raid0
7549          *      4: single
7550          */
7551         target = get_restripe_target(root->fs_info, block_group->flags);
7552         if (target) {
7553                 index = __get_raid_index(extended_to_chunk(target));
7554         } else {
7555                 /*
7556                  * this is just a balance, so if we were marked as full
7557                  * we know there is no space for a new chunk
7558                  */
7559                 if (full)
7560                         goto out;
7561
7562                 index = get_block_group_index(block_group);
7563         }
7564
7565         if (index == BTRFS_RAID_RAID10) {
7566                 dev_min = 4;
7567                 /* Divide by 2 */
7568                 min_free >>= 1;
7569         } else if (index == BTRFS_RAID_RAID1) {
7570                 dev_min = 2;
7571         } else if (index == BTRFS_RAID_DUP) {
7572                 /* Multiply by 2 */
7573                 min_free <<= 1;
7574         } else if (index == BTRFS_RAID_RAID0) {
7575                 dev_min = fs_devices->rw_devices;
7576                 do_div(min_free, dev_min);
7577         }
7578
7579         mutex_lock(&root->fs_info->chunk_mutex);
7580         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7581                 u64 dev_offset;
7582
7583                 /*
7584                  * check to make sure we can actually find a chunk with enough
7585                  * space to fit our block group in.
7586                  */
7587                 if (device->total_bytes > device->bytes_used + min_free &&
7588                     !device->is_tgtdev_for_dev_replace) {
7589                         ret = find_free_dev_extent(device, min_free,
7590                                                    &dev_offset, NULL);
7591                         if (!ret)
7592                                 dev_nr++;
7593
7594                         if (dev_nr >= dev_min)
7595                                 break;
7596
7597                         ret = -1;
7598                 }
7599         }
7600         mutex_unlock(&root->fs_info->chunk_mutex);
7601 out:
7602         btrfs_put_block_group(block_group);
7603         return ret;
7604 }
7605
7606 static int find_first_block_group(struct btrfs_root *root,
7607                 struct btrfs_path *path, struct btrfs_key *key)
7608 {
7609         int ret = 0;
7610         struct btrfs_key found_key;
7611         struct extent_buffer *leaf;
7612         int slot;
7613
7614         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7615         if (ret < 0)
7616                 goto out;
7617
7618         while (1) {
7619                 slot = path->slots[0];
7620                 leaf = path->nodes[0];
7621                 if (slot >= btrfs_header_nritems(leaf)) {
7622                         ret = btrfs_next_leaf(root, path);
7623                         if (ret == 0)
7624                                 continue;
7625                         if (ret < 0)
7626                                 goto out;
7627                         break;
7628                 }
7629                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7630
7631                 if (found_key.objectid >= key->objectid &&
7632                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7633                         ret = 0;
7634                         goto out;
7635                 }
7636                 path->slots[0]++;
7637         }
7638 out:
7639         return ret;
7640 }
7641
7642 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7643 {
7644         struct btrfs_block_group_cache *block_group;
7645         u64 last = 0;
7646
7647         while (1) {
7648                 struct inode *inode;
7649
7650                 block_group = btrfs_lookup_first_block_group(info, last);
7651                 while (block_group) {
7652                         spin_lock(&block_group->lock);
7653                         if (block_group->iref)
7654                                 break;
7655                         spin_unlock(&block_group->lock);
7656                         block_group = next_block_group(info->tree_root,
7657                                                        block_group);
7658                 }
7659                 if (!block_group) {
7660                         if (last == 0)
7661                                 break;
7662                         last = 0;
7663                         continue;
7664                 }
7665
7666                 inode = block_group->inode;
7667                 block_group->iref = 0;
7668                 block_group->inode = NULL;
7669                 spin_unlock(&block_group->lock);
7670                 iput(inode);
7671                 last = block_group->key.objectid + block_group->key.offset;
7672                 btrfs_put_block_group(block_group);
7673         }
7674 }
7675
7676 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7677 {
7678         struct btrfs_block_group_cache *block_group;
7679         struct btrfs_space_info *space_info;
7680         struct btrfs_caching_control *caching_ctl;
7681         struct rb_node *n;
7682
7683         down_write(&info->extent_commit_sem);
7684         while (!list_empty(&info->caching_block_groups)) {
7685                 caching_ctl = list_entry(info->caching_block_groups.next,
7686                                          struct btrfs_caching_control, list);
7687                 list_del(&caching_ctl->list);
7688                 put_caching_control(caching_ctl);
7689         }
7690         up_write(&info->extent_commit_sem);
7691
7692         spin_lock(&info->block_group_cache_lock);
7693         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7694                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7695                                        cache_node);
7696                 rb_erase(&block_group->cache_node,
7697                          &info->block_group_cache_tree);
7698                 spin_unlock(&info->block_group_cache_lock);
7699
7700                 down_write(&block_group->space_info->groups_sem);
7701                 list_del(&block_group->list);
7702                 up_write(&block_group->space_info->groups_sem);
7703
7704                 if (block_group->cached == BTRFS_CACHE_STARTED)
7705                         wait_block_group_cache_done(block_group);
7706
7707                 /*
7708                  * We haven't cached this block group, which means we could
7709                  * possibly have excluded extents on this block group.
7710                  */
7711                 if (block_group->cached == BTRFS_CACHE_NO)
7712                         free_excluded_extents(info->extent_root, block_group);
7713
7714                 btrfs_remove_free_space_cache(block_group);
7715                 btrfs_put_block_group(block_group);
7716
7717                 spin_lock(&info->block_group_cache_lock);
7718         }
7719         spin_unlock(&info->block_group_cache_lock);
7720
7721         /* now that all the block groups are freed, go through and
7722          * free all the space_info structs.  This is only called during
7723          * the final stages of unmount, and so we know nobody is
7724          * using them.  We call synchronize_rcu() once before we start,
7725          * just to be on the safe side.
7726          */
7727         synchronize_rcu();
7728
7729         release_global_block_rsv(info);
7730
7731         while(!list_empty(&info->space_info)) {
7732                 space_info = list_entry(info->space_info.next,
7733                                         struct btrfs_space_info,
7734                                         list);
7735                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
7736                         if (space_info->bytes_pinned > 0 ||
7737                             space_info->bytes_reserved > 0 ||
7738                             space_info->bytes_may_use > 0) {
7739                                 WARN_ON(1);
7740                                 dump_space_info(space_info, 0, 0);
7741                         }
7742                 }
7743                 list_del(&space_info->list);
7744                 kfree(space_info);
7745         }
7746         return 0;
7747 }
7748
7749 static void __link_block_group(struct btrfs_space_info *space_info,
7750                                struct btrfs_block_group_cache *cache)
7751 {
7752         int index = get_block_group_index(cache);
7753
7754         down_write(&space_info->groups_sem);
7755         list_add_tail(&cache->list, &space_info->block_groups[index]);
7756         up_write(&space_info->groups_sem);
7757 }
7758
7759 int btrfs_read_block_groups(struct btrfs_root *root)
7760 {
7761         struct btrfs_path *path;
7762         int ret;
7763         struct btrfs_block_group_cache *cache;
7764         struct btrfs_fs_info *info = root->fs_info;
7765         struct btrfs_space_info *space_info;
7766         struct btrfs_key key;
7767         struct btrfs_key found_key;
7768         struct extent_buffer *leaf;
7769         int need_clear = 0;
7770         u64 cache_gen;
7771
7772         root = info->extent_root;
7773         key.objectid = 0;
7774         key.offset = 0;
7775         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7776         path = btrfs_alloc_path();
7777         if (!path)
7778                 return -ENOMEM;
7779         path->reada = 1;
7780
7781         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7782         if (btrfs_test_opt(root, SPACE_CACHE) &&
7783             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7784                 need_clear = 1;
7785         if (btrfs_test_opt(root, CLEAR_CACHE))
7786                 need_clear = 1;
7787
7788         while (1) {
7789                 ret = find_first_block_group(root, path, &key);
7790                 if (ret > 0)
7791                         break;
7792                 if (ret != 0)
7793                         goto error;
7794                 leaf = path->nodes[0];
7795                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7796                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7797                 if (!cache) {
7798                         ret = -ENOMEM;
7799                         goto error;
7800                 }
7801                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7802                                                 GFP_NOFS);
7803                 if (!cache->free_space_ctl) {
7804                         kfree(cache);
7805                         ret = -ENOMEM;
7806                         goto error;
7807                 }
7808
7809                 atomic_set(&cache->count, 1);
7810                 spin_lock_init(&cache->lock);
7811                 cache->fs_info = info;
7812                 INIT_LIST_HEAD(&cache->list);
7813                 INIT_LIST_HEAD(&cache->cluster_list);
7814
7815                 if (need_clear) {
7816                         /*
7817                          * When we mount with old space cache, we need to
7818                          * set BTRFS_DC_CLEAR and set dirty flag.
7819                          *
7820                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7821                          *    truncate the old free space cache inode and
7822                          *    setup a new one.
7823                          * b) Setting 'dirty flag' makes sure that we flush
7824                          *    the new space cache info onto disk.
7825                          */
7826                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7827                         if (btrfs_test_opt(root, SPACE_CACHE))
7828                                 cache->dirty = 1;
7829                 }
7830
7831                 read_extent_buffer(leaf, &cache->item,
7832                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7833                                    sizeof(cache->item));
7834                 memcpy(&cache->key, &found_key, sizeof(found_key));
7835
7836                 key.objectid = found_key.objectid + found_key.offset;
7837                 btrfs_release_path(path);
7838                 cache->flags = btrfs_block_group_flags(&cache->item);
7839                 cache->sectorsize = root->sectorsize;
7840
7841                 btrfs_init_free_space_ctl(cache);
7842
7843                 /*
7844                  * We need to exclude the super stripes now so that the space
7845                  * info has super bytes accounted for, otherwise we'll think
7846                  * we have more space than we actually do.
7847                  */
7848                 exclude_super_stripes(root, cache);
7849
7850                 /*
7851                  * check for two cases, either we are full, and therefore
7852                  * don't need to bother with the caching work since we won't
7853                  * find any space, or we are empty, and we can just add all
7854                  * the space in and be done with it.  This saves us _alot_ of
7855                  * time, particularly in the full case.
7856                  */
7857                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7858                         cache->last_byte_to_unpin = (u64)-1;
7859                         cache->cached = BTRFS_CACHE_FINISHED;
7860                         free_excluded_extents(root, cache);
7861                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7862                         cache->last_byte_to_unpin = (u64)-1;
7863                         cache->cached = BTRFS_CACHE_FINISHED;
7864                         add_new_free_space(cache, root->fs_info,
7865                                            found_key.objectid,
7866                                            found_key.objectid +
7867                                            found_key.offset);
7868                         free_excluded_extents(root, cache);
7869                 }
7870
7871                 ret = update_space_info(info, cache->flags, found_key.offset,
7872                                         btrfs_block_group_used(&cache->item),
7873                                         &space_info);
7874                 BUG_ON(ret); /* -ENOMEM */
7875                 cache->space_info = space_info;
7876                 spin_lock(&cache->space_info->lock);
7877                 cache->space_info->bytes_readonly += cache->bytes_super;
7878                 spin_unlock(&cache->space_info->lock);
7879
7880                 __link_block_group(space_info, cache);
7881
7882                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7883                 BUG_ON(ret); /* Logic error */
7884
7885                 set_avail_alloc_bits(root->fs_info, cache->flags);
7886                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7887                         set_block_group_ro(cache, 1);
7888         }
7889
7890         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7891                 if (!(get_alloc_profile(root, space_info->flags) &
7892                       (BTRFS_BLOCK_GROUP_RAID10 |
7893                        BTRFS_BLOCK_GROUP_RAID1 |
7894                        BTRFS_BLOCK_GROUP_DUP)))
7895                         continue;
7896                 /*
7897                  * avoid allocating from un-mirrored block group if there are
7898                  * mirrored block groups.
7899                  */
7900                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7901                         set_block_group_ro(cache, 1);
7902                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7903                         set_block_group_ro(cache, 1);
7904         }
7905
7906         init_global_block_rsv(info);
7907         ret = 0;
7908 error:
7909         btrfs_free_path(path);
7910         return ret;
7911 }
7912
7913 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
7914                                        struct btrfs_root *root)
7915 {
7916         struct btrfs_block_group_cache *block_group, *tmp;
7917         struct btrfs_root *extent_root = root->fs_info->extent_root;
7918         struct btrfs_block_group_item item;
7919         struct btrfs_key key;
7920         int ret = 0;
7921
7922         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
7923                                  new_bg_list) {
7924                 list_del_init(&block_group->new_bg_list);
7925
7926                 if (ret)
7927                         continue;
7928
7929                 spin_lock(&block_group->lock);
7930                 memcpy(&item, &block_group->item, sizeof(item));
7931                 memcpy(&key, &block_group->key, sizeof(key));
7932                 spin_unlock(&block_group->lock);
7933
7934                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
7935                                         sizeof(item));
7936                 if (ret)
7937                         btrfs_abort_transaction(trans, extent_root, ret);
7938         }
7939 }
7940
7941 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7942                            struct btrfs_root *root, u64 bytes_used,
7943                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7944                            u64 size)
7945 {
7946         int ret;
7947         struct btrfs_root *extent_root;
7948         struct btrfs_block_group_cache *cache;
7949
7950         extent_root = root->fs_info->extent_root;
7951
7952         root->fs_info->last_trans_log_full_commit = trans->transid;
7953
7954         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7955         if (!cache)
7956                 return -ENOMEM;
7957         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7958                                         GFP_NOFS);
7959         if (!cache->free_space_ctl) {
7960                 kfree(cache);
7961                 return -ENOMEM;
7962         }
7963
7964         cache->key.objectid = chunk_offset;
7965         cache->key.offset = size;
7966         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7967         cache->sectorsize = root->sectorsize;
7968         cache->fs_info = root->fs_info;
7969
7970         atomic_set(&cache->count, 1);
7971         spin_lock_init(&cache->lock);
7972         INIT_LIST_HEAD(&cache->list);
7973         INIT_LIST_HEAD(&cache->cluster_list);
7974         INIT_LIST_HEAD(&cache->new_bg_list);
7975
7976         btrfs_init_free_space_ctl(cache);
7977
7978         btrfs_set_block_group_used(&cache->item, bytes_used);
7979         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7980         cache->flags = type;
7981         btrfs_set_block_group_flags(&cache->item, type);
7982
7983         cache->last_byte_to_unpin = (u64)-1;
7984         cache->cached = BTRFS_CACHE_FINISHED;
7985         exclude_super_stripes(root, cache);
7986
7987         add_new_free_space(cache, root->fs_info, chunk_offset,
7988                            chunk_offset + size);
7989
7990         free_excluded_extents(root, cache);
7991
7992         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7993                                 &cache->space_info);
7994         BUG_ON(ret); /* -ENOMEM */
7995         update_global_block_rsv(root->fs_info);
7996
7997         spin_lock(&cache->space_info->lock);
7998         cache->space_info->bytes_readonly += cache->bytes_super;
7999         spin_unlock(&cache->space_info->lock);
8000
8001         __link_block_group(cache->space_info, cache);
8002
8003         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8004         BUG_ON(ret); /* Logic error */
8005
8006         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8007
8008         set_avail_alloc_bits(extent_root->fs_info, type);
8009
8010         return 0;
8011 }
8012
8013 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8014 {
8015         u64 extra_flags = chunk_to_extended(flags) &
8016                                 BTRFS_EXTENDED_PROFILE_MASK;
8017
8018         write_seqlock(&fs_info->profiles_lock);
8019         if (flags & BTRFS_BLOCK_GROUP_DATA)
8020                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8021         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8022                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8023         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8024                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8025         write_sequnlock(&fs_info->profiles_lock);
8026 }
8027
8028 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8029                              struct btrfs_root *root, u64 group_start)
8030 {
8031         struct btrfs_path *path;
8032         struct btrfs_block_group_cache *block_group;
8033         struct btrfs_free_cluster *cluster;
8034         struct btrfs_root *tree_root = root->fs_info->tree_root;
8035         struct btrfs_key key;
8036         struct inode *inode;
8037         int ret;
8038         int index;
8039         int factor;
8040
8041         root = root->fs_info->extent_root;
8042
8043         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8044         BUG_ON(!block_group);
8045         BUG_ON(!block_group->ro);
8046
8047         /*
8048          * Free the reserved super bytes from this block group before
8049          * remove it.
8050          */
8051         free_excluded_extents(root, block_group);
8052
8053         memcpy(&key, &block_group->key, sizeof(key));
8054         index = get_block_group_index(block_group);
8055         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8056                                   BTRFS_BLOCK_GROUP_RAID1 |
8057                                   BTRFS_BLOCK_GROUP_RAID10))
8058                 factor = 2;
8059         else
8060                 factor = 1;
8061
8062         /* make sure this block group isn't part of an allocation cluster */
8063         cluster = &root->fs_info->data_alloc_cluster;
8064         spin_lock(&cluster->refill_lock);
8065         btrfs_return_cluster_to_free_space(block_group, cluster);
8066         spin_unlock(&cluster->refill_lock);
8067
8068         /*
8069          * make sure this block group isn't part of a metadata
8070          * allocation cluster
8071          */
8072         cluster = &root->fs_info->meta_alloc_cluster;
8073         spin_lock(&cluster->refill_lock);
8074         btrfs_return_cluster_to_free_space(block_group, cluster);
8075         spin_unlock(&cluster->refill_lock);
8076
8077         path = btrfs_alloc_path();
8078         if (!path) {
8079                 ret = -ENOMEM;
8080                 goto out;
8081         }
8082
8083         inode = lookup_free_space_inode(tree_root, block_group, path);
8084         if (!IS_ERR(inode)) {
8085                 ret = btrfs_orphan_add(trans, inode);
8086                 if (ret) {
8087                         btrfs_add_delayed_iput(inode);
8088                         goto out;
8089                 }
8090                 clear_nlink(inode);
8091                 /* One for the block groups ref */
8092                 spin_lock(&block_group->lock);
8093                 if (block_group->iref) {
8094                         block_group->iref = 0;
8095                         block_group->inode = NULL;
8096                         spin_unlock(&block_group->lock);
8097                         iput(inode);
8098                 } else {
8099                         spin_unlock(&block_group->lock);
8100                 }
8101                 /* One for our lookup ref */
8102                 btrfs_add_delayed_iput(inode);
8103         }
8104
8105         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8106         key.offset = block_group->key.objectid;
8107         key.type = 0;
8108
8109         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8110         if (ret < 0)
8111                 goto out;
8112         if (ret > 0)
8113                 btrfs_release_path(path);
8114         if (ret == 0) {
8115                 ret = btrfs_del_item(trans, tree_root, path);
8116                 if (ret)
8117                         goto out;
8118                 btrfs_release_path(path);
8119         }
8120
8121         spin_lock(&root->fs_info->block_group_cache_lock);
8122         rb_erase(&block_group->cache_node,
8123                  &root->fs_info->block_group_cache_tree);
8124
8125         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8126                 root->fs_info->first_logical_byte = (u64)-1;
8127         spin_unlock(&root->fs_info->block_group_cache_lock);
8128
8129         down_write(&block_group->space_info->groups_sem);
8130         /*
8131          * we must use list_del_init so people can check to see if they
8132          * are still on the list after taking the semaphore
8133          */
8134         list_del_init(&block_group->list);
8135         if (list_empty(&block_group->space_info->block_groups[index]))
8136                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8137         up_write(&block_group->space_info->groups_sem);
8138
8139         if (block_group->cached == BTRFS_CACHE_STARTED)
8140                 wait_block_group_cache_done(block_group);
8141
8142         btrfs_remove_free_space_cache(block_group);
8143
8144         spin_lock(&block_group->space_info->lock);
8145         block_group->space_info->total_bytes -= block_group->key.offset;
8146         block_group->space_info->bytes_readonly -= block_group->key.offset;
8147         block_group->space_info->disk_total -= block_group->key.offset * factor;
8148         spin_unlock(&block_group->space_info->lock);
8149
8150         memcpy(&key, &block_group->key, sizeof(key));
8151
8152         btrfs_clear_space_info_full(root->fs_info);
8153
8154         btrfs_put_block_group(block_group);
8155         btrfs_put_block_group(block_group);
8156
8157         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8158         if (ret > 0)
8159                 ret = -EIO;
8160         if (ret < 0)
8161                 goto out;
8162
8163         ret = btrfs_del_item(trans, root, path);
8164 out:
8165         btrfs_free_path(path);
8166         return ret;
8167 }
8168
8169 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8170 {
8171         struct btrfs_space_info *space_info;
8172         struct btrfs_super_block *disk_super;
8173         u64 features;
8174         u64 flags;
8175         int mixed = 0;
8176         int ret;
8177
8178         disk_super = fs_info->super_copy;
8179         if (!btrfs_super_root(disk_super))
8180                 return 1;
8181
8182         features = btrfs_super_incompat_flags(disk_super);
8183         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8184                 mixed = 1;
8185
8186         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8187         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8188         if (ret)
8189                 goto out;
8190
8191         if (mixed) {
8192                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8193                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8194         } else {
8195                 flags = BTRFS_BLOCK_GROUP_METADATA;
8196                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8197                 if (ret)
8198                         goto out;
8199
8200                 flags = BTRFS_BLOCK_GROUP_DATA;
8201                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8202         }
8203 out:
8204         return ret;
8205 }
8206
8207 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8208 {
8209         return unpin_extent_range(root, start, end);
8210 }
8211
8212 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8213                                u64 num_bytes, u64 *actual_bytes)
8214 {
8215         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8216 }
8217
8218 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8219 {
8220         struct btrfs_fs_info *fs_info = root->fs_info;
8221         struct btrfs_block_group_cache *cache = NULL;
8222         u64 group_trimmed;
8223         u64 start;
8224         u64 end;
8225         u64 trimmed = 0;
8226         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8227         int ret = 0;
8228
8229         /*
8230          * try to trim all FS space, our block group may start from non-zero.
8231          */
8232         if (range->len == total_bytes)
8233                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8234         else
8235                 cache = btrfs_lookup_block_group(fs_info, range->start);
8236
8237         while (cache) {
8238                 if (cache->key.objectid >= (range->start + range->len)) {
8239                         btrfs_put_block_group(cache);
8240                         break;
8241                 }
8242
8243                 start = max(range->start, cache->key.objectid);
8244                 end = min(range->start + range->len,
8245                                 cache->key.objectid + cache->key.offset);
8246
8247                 if (end - start >= range->minlen) {
8248                         if (!block_group_cache_done(cache)) {
8249                                 ret = cache_block_group(cache, 0);
8250                                 if (!ret)
8251                                         wait_block_group_cache_done(cache);
8252                         }
8253                         ret = btrfs_trim_block_group(cache,
8254                                                      &group_trimmed,
8255                                                      start,
8256                                                      end,
8257                                                      range->minlen);
8258
8259                         trimmed += group_trimmed;
8260                         if (ret) {
8261                                 btrfs_put_block_group(cache);
8262                                 break;
8263                         }
8264                 }
8265
8266                 cache = next_block_group(fs_info->tree_root, cache);
8267         }
8268
8269         range->len = trimmed;
8270         return ret;
8271 }