2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
26 #include "print-tree.h"
27 #include "transaction.h"
30 #include "ref-cache.h"
32 #define PENDING_EXTENT_INSERT 0
33 #define PENDING_EXTENT_DELETE 1
34 #define PENDING_BACKREF_UPDATE 2
36 struct pending_extent_op {
47 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
48 btrfs_root *extent_root, int all);
49 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
50 btrfs_root *extent_root, int all);
51 static struct btrfs_block_group_cache *
52 __btrfs_find_block_group(struct btrfs_root *root,
53 struct btrfs_block_group_cache *hint,
54 u64 search_start, int data, int owner);
56 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
58 return (cache->flags & bits) == bits;
62 * this adds the block group to the fs_info rb tree for the block group
65 int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
66 struct btrfs_block_group_cache *block_group)
69 struct rb_node *parent = NULL;
70 struct btrfs_block_group_cache *cache;
72 spin_lock(&info->block_group_cache_lock);
73 p = &info->block_group_cache_tree.rb_node;
77 cache = rb_entry(parent, struct btrfs_block_group_cache,
79 if (block_group->key.objectid < cache->key.objectid) {
81 } else if (block_group->key.objectid > cache->key.objectid) {
84 spin_unlock(&info->block_group_cache_lock);
89 rb_link_node(&block_group->cache_node, parent, p);
90 rb_insert_color(&block_group->cache_node,
91 &info->block_group_cache_tree);
92 spin_unlock(&info->block_group_cache_lock);
98 * This will return the block group at or after bytenr if contains is 0, else
99 * it will return the block group that contains the bytenr
101 static struct btrfs_block_group_cache *
102 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
105 struct btrfs_block_group_cache *cache, *ret = NULL;
109 spin_lock(&info->block_group_cache_lock);
110 n = info->block_group_cache_tree.rb_node;
113 cache = rb_entry(n, struct btrfs_block_group_cache,
115 end = cache->key.objectid + cache->key.offset - 1;
116 start = cache->key.objectid;
118 if (bytenr < start) {
119 if (!contains && (!ret || start < ret->key.objectid))
122 } else if (bytenr > start) {
123 if (contains && bytenr <= end) {
133 spin_unlock(&info->block_group_cache_lock);
139 * this is only called by cache_block_group, since we could have freed extents
140 * we need to check the pinned_extents for any extents that can't be used yet
141 * since their free space will be released as soon as the transaction commits.
143 static int add_new_free_space(struct btrfs_block_group_cache *block_group,
144 struct btrfs_fs_info *info, u64 start, u64 end)
146 u64 extent_start, extent_end, size;
149 mutex_lock(&info->pinned_mutex);
150 while (start < end) {
151 ret = find_first_extent_bit(&info->pinned_extents, start,
152 &extent_start, &extent_end,
157 if (extent_start == start) {
158 start = extent_end + 1;
159 } else if (extent_start > start && extent_start < end) {
160 size = extent_start - start;
161 ret = btrfs_add_free_space_lock(block_group, start,
164 start = extent_end + 1;
172 ret = btrfs_add_free_space_lock(block_group, start, size);
175 mutex_unlock(&info->pinned_mutex);
180 static int cache_block_group(struct btrfs_root *root,
181 struct btrfs_block_group_cache *block_group)
183 struct btrfs_path *path;
185 struct btrfs_key key;
186 struct extent_buffer *leaf;
195 root = root->fs_info->extent_root;
197 if (block_group->cached)
200 path = btrfs_alloc_path();
206 * we get into deadlocks with paths held by callers of this function.
207 * since the alloc_mutex is protecting things right now, just
208 * skip the locking here
210 path->skip_locking = 1;
211 first_free = max_t(u64, block_group->key.objectid,
212 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
213 key.objectid = block_group->key.objectid;
215 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
216 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
219 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
223 leaf = path->nodes[0];
224 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
225 if (key.objectid + key.offset > first_free)
226 first_free = key.objectid + key.offset;
229 leaf = path->nodes[0];
230 slot = path->slots[0];
231 if (slot >= btrfs_header_nritems(leaf)) {
232 ret = btrfs_next_leaf(root, path);
240 btrfs_item_key_to_cpu(leaf, &key, slot);
241 if (key.objectid < block_group->key.objectid)
244 if (key.objectid >= block_group->key.objectid +
245 block_group->key.offset)
248 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
254 add_new_free_space(block_group, root->fs_info, last,
257 last = key.objectid + key.offset;
266 add_new_free_space(block_group, root->fs_info, last,
267 block_group->key.objectid +
268 block_group->key.offset);
270 block_group->cached = 1;
273 btrfs_free_path(path);
278 * return the block group that starts at or after bytenr
280 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
284 struct btrfs_block_group_cache *cache;
286 cache = block_group_cache_tree_search(info, bytenr, 0);
292 * return the block group that contains teh given bytenr
294 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
298 struct btrfs_block_group_cache *cache;
300 cache = block_group_cache_tree_search(info, bytenr, 1);
305 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
308 struct list_head *head = &info->space_info;
309 struct list_head *cur;
310 struct btrfs_space_info *found;
311 list_for_each(cur, head) {
312 found = list_entry(cur, struct btrfs_space_info, list);
313 if (found->flags == flags)
319 static u64 div_factor(u64 num, int factor)
328 static struct btrfs_block_group_cache *
329 __btrfs_find_block_group(struct btrfs_root *root,
330 struct btrfs_block_group_cache *hint,
331 u64 search_start, int data, int owner)
333 struct btrfs_block_group_cache *cache;
334 struct btrfs_block_group_cache *found_group = NULL;
335 struct btrfs_fs_info *info = root->fs_info;
343 if (data & BTRFS_BLOCK_GROUP_METADATA)
347 struct btrfs_block_group_cache *shint;
348 shint = btrfs_lookup_first_block_group(info, search_start);
349 if (shint && block_group_bits(shint, data) && !shint->ro) {
350 spin_lock(&shint->lock);
351 used = btrfs_block_group_used(&shint->item);
352 if (used + shint->pinned + shint->reserved <
353 div_factor(shint->key.offset, factor)) {
354 spin_unlock(&shint->lock);
357 spin_unlock(&shint->lock);
360 if (hint && !hint->ro && block_group_bits(hint, data)) {
361 spin_lock(&hint->lock);
362 used = btrfs_block_group_used(&hint->item);
363 if (used + hint->pinned + hint->reserved <
364 div_factor(hint->key.offset, factor)) {
365 spin_unlock(&hint->lock);
368 spin_unlock(&hint->lock);
369 last = hint->key.objectid + hint->key.offset;
372 last = max(hint->key.objectid, search_start);
378 cache = btrfs_lookup_first_block_group(root->fs_info, last);
382 spin_lock(&cache->lock);
383 last = cache->key.objectid + cache->key.offset;
384 used = btrfs_block_group_used(&cache->item);
386 if (!cache->ro && block_group_bits(cache, data)) {
387 free_check = div_factor(cache->key.offset, factor);
388 if (used + cache->pinned + cache->reserved <
391 spin_unlock(&cache->lock);
395 spin_unlock(&cache->lock);
403 if (!full_search && factor < 10) {
413 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
414 struct btrfs_block_group_cache
415 *hint, u64 search_start,
419 struct btrfs_block_group_cache *ret;
420 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
424 /* simple helper to search for an existing extent at a given offset */
425 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
428 struct btrfs_key key;
429 struct btrfs_path *path;
431 path = btrfs_alloc_path();
433 key.objectid = start;
435 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
436 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
438 btrfs_free_path(path);
443 * Back reference rules. Back refs have three main goals:
445 * 1) differentiate between all holders of references to an extent so that
446 * when a reference is dropped we can make sure it was a valid reference
447 * before freeing the extent.
449 * 2) Provide enough information to quickly find the holders of an extent
450 * if we notice a given block is corrupted or bad.
452 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
453 * maintenance. This is actually the same as #2, but with a slightly
454 * different use case.
456 * File extents can be referenced by:
458 * - multiple snapshots, subvolumes, or different generations in one subvol
459 * - different files inside a single subvolume
460 * - different offsets inside a file (bookend extents in file.c)
462 * The extent ref structure has fields for:
464 * - Objectid of the subvolume root
465 * - Generation number of the tree holding the reference
466 * - objectid of the file holding the reference
467 * - number of references holding by parent node (alway 1 for tree blocks)
469 * Btree leaf may hold multiple references to a file extent. In most cases,
470 * these references are from same file and the corresponding offsets inside
471 * the file are close together.
473 * When a file extent is allocated the fields are filled in:
474 * (root_key.objectid, trans->transid, inode objectid, 1)
476 * When a leaf is cow'd new references are added for every file extent found
477 * in the leaf. It looks similar to the create case, but trans->transid will
478 * be different when the block is cow'd.
480 * (root_key.objectid, trans->transid, inode objectid,
481 * number of references in the leaf)
483 * When a file extent is removed either during snapshot deletion or
484 * file truncation, we find the corresponding back reference and check
485 * the following fields:
487 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
490 * Btree extents can be referenced by:
492 * - Different subvolumes
493 * - Different generations of the same subvolume
495 * When a tree block is created, back references are inserted:
497 * (root->root_key.objectid, trans->transid, level, 1)
499 * When a tree block is cow'd, new back references are added for all the
500 * blocks it points to. If the tree block isn't in reference counted root,
501 * the old back references are removed. These new back references are of
502 * the form (trans->transid will have increased since creation):
504 * (root->root_key.objectid, trans->transid, level, 1)
506 * When a backref is in deleting, the following fields are checked:
508 * if backref was for a tree root:
509 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
511 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
513 * Back Reference Key composing:
515 * The key objectid corresponds to the first byte in the extent, the key
516 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
517 * byte of parent extent. If a extent is tree root, the key offset is set
518 * to the key objectid.
521 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root,
523 struct btrfs_path *path,
524 u64 bytenr, u64 parent,
525 u64 ref_root, u64 ref_generation,
526 u64 owner_objectid, int del)
528 struct btrfs_key key;
529 struct btrfs_extent_ref *ref;
530 struct extent_buffer *leaf;
534 key.objectid = bytenr;
535 key.type = BTRFS_EXTENT_REF_KEY;
538 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
546 leaf = path->nodes[0];
547 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
548 ref_objectid = btrfs_ref_objectid(leaf, ref);
549 if (btrfs_ref_root(leaf, ref) != ref_root ||
550 btrfs_ref_generation(leaf, ref) != ref_generation ||
551 (ref_objectid != owner_objectid &&
552 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
562 static int noinline insert_extent_backref(struct btrfs_trans_handle *trans,
563 struct btrfs_root *root,
564 struct btrfs_path *path,
565 u64 bytenr, u64 parent,
566 u64 ref_root, u64 ref_generation,
569 struct btrfs_key key;
570 struct extent_buffer *leaf;
571 struct btrfs_extent_ref *ref;
575 key.objectid = bytenr;
576 key.type = BTRFS_EXTENT_REF_KEY;
579 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
581 leaf = path->nodes[0];
582 ref = btrfs_item_ptr(leaf, path->slots[0],
583 struct btrfs_extent_ref);
584 btrfs_set_ref_root(leaf, ref, ref_root);
585 btrfs_set_ref_generation(leaf, ref, ref_generation);
586 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
587 btrfs_set_ref_num_refs(leaf, ref, 1);
588 } else if (ret == -EEXIST) {
590 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
591 leaf = path->nodes[0];
592 ref = btrfs_item_ptr(leaf, path->slots[0],
593 struct btrfs_extent_ref);
594 if (btrfs_ref_root(leaf, ref) != ref_root ||
595 btrfs_ref_generation(leaf, ref) != ref_generation) {
601 num_refs = btrfs_ref_num_refs(leaf, ref);
602 BUG_ON(num_refs == 0);
603 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
605 existing_owner = btrfs_ref_objectid(leaf, ref);
606 if (existing_owner != owner_objectid &&
607 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
608 btrfs_set_ref_objectid(leaf, ref,
609 BTRFS_MULTIPLE_OBJECTIDS);
615 btrfs_mark_buffer_dirty(path->nodes[0]);
617 btrfs_release_path(root, path);
621 static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
622 struct btrfs_root *root,
623 struct btrfs_path *path)
625 struct extent_buffer *leaf;
626 struct btrfs_extent_ref *ref;
630 leaf = path->nodes[0];
631 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
632 num_refs = btrfs_ref_num_refs(leaf, ref);
633 BUG_ON(num_refs == 0);
636 ret = btrfs_del_item(trans, root, path);
638 btrfs_set_ref_num_refs(leaf, ref, num_refs);
639 btrfs_mark_buffer_dirty(leaf);
641 btrfs_release_path(root, path);
645 static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
646 struct btrfs_root *root, u64 bytenr,
647 u64 orig_parent, u64 parent,
648 u64 orig_root, u64 ref_root,
649 u64 orig_generation, u64 ref_generation,
653 struct btrfs_root *extent_root = root->fs_info->extent_root;
654 struct btrfs_path *path;
656 if (root == root->fs_info->extent_root) {
657 struct pending_extent_op *extent_op;
660 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
661 num_bytes = btrfs_level_size(root, (int)owner_objectid);
662 mutex_lock(&root->fs_info->extent_ins_mutex);
663 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
664 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
666 ret = get_state_private(&root->fs_info->extent_ins,
669 extent_op = (struct pending_extent_op *)
671 BUG_ON(extent_op->parent != orig_parent);
672 BUG_ON(extent_op->generation != orig_generation);
674 extent_op->parent = parent;
675 extent_op->generation = ref_generation;
677 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
680 extent_op->type = PENDING_BACKREF_UPDATE;
681 extent_op->bytenr = bytenr;
682 extent_op->num_bytes = num_bytes;
683 extent_op->parent = parent;
684 extent_op->orig_parent = orig_parent;
685 extent_op->generation = ref_generation;
686 extent_op->orig_generation = orig_generation;
687 extent_op->level = (int)owner_objectid;
689 set_extent_bits(&root->fs_info->extent_ins,
690 bytenr, bytenr + num_bytes - 1,
691 EXTENT_WRITEBACK, GFP_NOFS);
692 set_state_private(&root->fs_info->extent_ins,
693 bytenr, (unsigned long)extent_op);
695 mutex_unlock(&root->fs_info->extent_ins_mutex);
699 path = btrfs_alloc_path();
702 ret = lookup_extent_backref(trans, extent_root, path,
703 bytenr, orig_parent, orig_root,
704 orig_generation, owner_objectid, 1);
707 ret = remove_extent_backref(trans, extent_root, path);
710 ret = insert_extent_backref(trans, extent_root, path, bytenr,
711 parent, ref_root, ref_generation,
714 finish_current_insert(trans, extent_root, 0);
715 del_pending_extents(trans, extent_root, 0);
717 btrfs_free_path(path);
721 int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
722 struct btrfs_root *root, u64 bytenr,
723 u64 orig_parent, u64 parent,
724 u64 ref_root, u64 ref_generation,
728 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
729 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
731 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
732 parent, ref_root, ref_root,
733 ref_generation, ref_generation,
738 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
739 struct btrfs_root *root, u64 bytenr,
740 u64 orig_parent, u64 parent,
741 u64 orig_root, u64 ref_root,
742 u64 orig_generation, u64 ref_generation,
745 struct btrfs_path *path;
747 struct btrfs_key key;
748 struct extent_buffer *l;
749 struct btrfs_extent_item *item;
752 path = btrfs_alloc_path();
757 key.objectid = bytenr;
758 key.type = BTRFS_EXTENT_ITEM_KEY;
759 key.offset = (u64)-1;
761 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
765 BUG_ON(ret == 0 || path->slots[0] == 0);
770 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
771 BUG_ON(key.objectid != bytenr);
772 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
774 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
775 refs = btrfs_extent_refs(l, item);
776 btrfs_set_extent_refs(l, item, refs + 1);
777 btrfs_mark_buffer_dirty(path->nodes[0]);
779 btrfs_release_path(root->fs_info->extent_root, path);
782 ret = insert_extent_backref(trans, root->fs_info->extent_root,
783 path, bytenr, parent,
784 ref_root, ref_generation,
787 finish_current_insert(trans, root->fs_info->extent_root, 0);
788 del_pending_extents(trans, root->fs_info->extent_root, 0);
790 btrfs_free_path(path);
794 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
795 struct btrfs_root *root,
796 u64 bytenr, u64 num_bytes, u64 parent,
797 u64 ref_root, u64 ref_generation,
801 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
802 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
804 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
805 0, ref_root, 0, ref_generation,
810 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
811 struct btrfs_root *root)
813 finish_current_insert(trans, root->fs_info->extent_root, 1);
814 del_pending_extents(trans, root->fs_info->extent_root, 1);
818 int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
819 struct btrfs_root *root, u64 bytenr,
820 u64 num_bytes, u32 *refs)
822 struct btrfs_path *path;
824 struct btrfs_key key;
825 struct extent_buffer *l;
826 struct btrfs_extent_item *item;
828 WARN_ON(num_bytes < root->sectorsize);
829 path = btrfs_alloc_path();
831 key.objectid = bytenr;
832 key.offset = num_bytes;
833 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
834 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
839 btrfs_print_leaf(root, path->nodes[0]);
840 printk("failed to find block number %Lu\n", bytenr);
844 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
845 *refs = btrfs_extent_refs(l, item);
847 btrfs_free_path(path);
851 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
852 struct btrfs_root *root, u64 bytenr)
854 struct btrfs_root *extent_root = root->fs_info->extent_root;
855 struct btrfs_path *path;
856 struct extent_buffer *leaf;
857 struct btrfs_extent_ref *ref_item;
858 struct btrfs_key key;
859 struct btrfs_key found_key;
865 key.objectid = bytenr;
866 key.offset = (u64)-1;
867 key.type = BTRFS_EXTENT_ITEM_KEY;
869 path = btrfs_alloc_path();
870 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
876 if (path->slots[0] == 0)
880 leaf = path->nodes[0];
881 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
883 if (found_key.objectid != bytenr ||
884 found_key.type != BTRFS_EXTENT_ITEM_KEY)
887 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
889 leaf = path->nodes[0];
890 nritems = btrfs_header_nritems(leaf);
891 if (path->slots[0] >= nritems) {
892 ret = btrfs_next_leaf(extent_root, path);
899 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
900 if (found_key.objectid != bytenr)
903 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
908 ref_item = btrfs_item_ptr(leaf, path->slots[0],
909 struct btrfs_extent_ref);
910 ref_root = btrfs_ref_root(leaf, ref_item);
911 if (ref_root != root->root_key.objectid &&
912 ref_root != BTRFS_TREE_LOG_OBJECTID) {
916 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
925 btrfs_free_path(path);
929 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
930 struct extent_buffer *buf, u32 nr_extents)
932 struct btrfs_key key;
933 struct btrfs_file_extent_item *fi;
944 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
946 root_gen = root->root_key.offset;
949 root_gen = trans->transid - 1;
952 level = btrfs_header_level(buf);
953 nritems = btrfs_header_nritems(buf);
956 struct btrfs_leaf_ref *ref;
957 struct btrfs_extent_info *info;
959 ref = btrfs_alloc_leaf_ref(root, nr_extents);
965 ref->root_gen = root_gen;
966 ref->bytenr = buf->start;
967 ref->owner = btrfs_header_owner(buf);
968 ref->generation = btrfs_header_generation(buf);
969 ref->nritems = nr_extents;
972 for (i = 0; nr_extents > 0 && i < nritems; i++) {
974 btrfs_item_key_to_cpu(buf, &key, i);
975 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
977 fi = btrfs_item_ptr(buf, i,
978 struct btrfs_file_extent_item);
979 if (btrfs_file_extent_type(buf, fi) ==
980 BTRFS_FILE_EXTENT_INLINE)
982 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
983 if (disk_bytenr == 0)
986 info->bytenr = disk_bytenr;
988 btrfs_file_extent_disk_num_bytes(buf, fi);
989 info->objectid = key.objectid;
990 info->offset = key.offset;
994 ret = btrfs_add_leaf_ref(root, ref, shared);
995 if (ret == -EEXIST && shared) {
996 struct btrfs_leaf_ref *old;
997 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
999 btrfs_remove_leaf_ref(root, old);
1000 btrfs_free_leaf_ref(root, old);
1001 ret = btrfs_add_leaf_ref(root, ref, shared);
1004 btrfs_free_leaf_ref(root, ref);
1010 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1011 struct extent_buffer *orig_buf, struct extent_buffer *buf,
1018 u64 orig_generation;
1020 u32 nr_file_extents = 0;
1021 struct btrfs_key key;
1022 struct btrfs_file_extent_item *fi;
1027 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1028 u64, u64, u64, u64, u64, u64, u64, u64);
1030 ref_root = btrfs_header_owner(buf);
1031 ref_generation = btrfs_header_generation(buf);
1032 orig_root = btrfs_header_owner(orig_buf);
1033 orig_generation = btrfs_header_generation(orig_buf);
1035 nritems = btrfs_header_nritems(buf);
1036 level = btrfs_header_level(buf);
1038 if (root->ref_cows) {
1039 process_func = __btrfs_inc_extent_ref;
1042 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1045 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1047 process_func = __btrfs_update_extent_ref;
1050 for (i = 0; i < nritems; i++) {
1053 btrfs_item_key_to_cpu(buf, &key, i);
1054 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1056 fi = btrfs_item_ptr(buf, i,
1057 struct btrfs_file_extent_item);
1058 if (btrfs_file_extent_type(buf, fi) ==
1059 BTRFS_FILE_EXTENT_INLINE)
1061 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1067 ret = process_func(trans, root, bytenr,
1068 orig_buf->start, buf->start,
1069 orig_root, ref_root,
1070 orig_generation, ref_generation,
1079 bytenr = btrfs_node_blockptr(buf, i);
1080 ret = process_func(trans, root, bytenr,
1081 orig_buf->start, buf->start,
1082 orig_root, ref_root,
1083 orig_generation, ref_generation,
1095 *nr_extents = nr_file_extents;
1097 *nr_extents = nritems;
1105 int btrfs_update_ref(struct btrfs_trans_handle *trans,
1106 struct btrfs_root *root, struct extent_buffer *orig_buf,
1107 struct extent_buffer *buf, int start_slot, int nr)
1114 u64 orig_generation;
1115 struct btrfs_key key;
1116 struct btrfs_file_extent_item *fi;
1122 BUG_ON(start_slot < 0);
1123 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1125 ref_root = btrfs_header_owner(buf);
1126 ref_generation = btrfs_header_generation(buf);
1127 orig_root = btrfs_header_owner(orig_buf);
1128 orig_generation = btrfs_header_generation(orig_buf);
1129 level = btrfs_header_level(buf);
1131 if (!root->ref_cows) {
1133 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1136 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1140 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1143 btrfs_item_key_to_cpu(buf, &key, slot);
1144 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1146 fi = btrfs_item_ptr(buf, slot,
1147 struct btrfs_file_extent_item);
1148 if (btrfs_file_extent_type(buf, fi) ==
1149 BTRFS_FILE_EXTENT_INLINE)
1151 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1154 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1155 orig_buf->start, buf->start,
1156 orig_root, ref_root,
1157 orig_generation, ref_generation,
1162 bytenr = btrfs_node_blockptr(buf, slot);
1163 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1164 orig_buf->start, buf->start,
1165 orig_root, ref_root,
1166 orig_generation, ref_generation,
1178 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1179 struct btrfs_root *root,
1180 struct btrfs_path *path,
1181 struct btrfs_block_group_cache *cache)
1185 struct btrfs_root *extent_root = root->fs_info->extent_root;
1187 struct extent_buffer *leaf;
1189 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1194 leaf = path->nodes[0];
1195 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1196 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1197 btrfs_mark_buffer_dirty(leaf);
1198 btrfs_release_path(extent_root, path);
1200 finish_current_insert(trans, extent_root, 0);
1201 pending_ret = del_pending_extents(trans, extent_root, 0);
1210 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1211 struct btrfs_root *root)
1213 struct btrfs_block_group_cache *cache, *entry;
1217 struct btrfs_path *path;
1220 path = btrfs_alloc_path();
1226 spin_lock(&root->fs_info->block_group_cache_lock);
1227 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1228 n; n = rb_next(n)) {
1229 entry = rb_entry(n, struct btrfs_block_group_cache,
1236 spin_unlock(&root->fs_info->block_group_cache_lock);
1242 last += cache->key.offset;
1244 err = write_one_cache_group(trans, root,
1247 * if we fail to write the cache group, we want
1248 * to keep it marked dirty in hopes that a later
1256 btrfs_free_path(path);
1260 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1261 u64 total_bytes, u64 bytes_used,
1262 struct btrfs_space_info **space_info)
1264 struct btrfs_space_info *found;
1266 found = __find_space_info(info, flags);
1268 spin_lock(&found->lock);
1269 found->total_bytes += total_bytes;
1270 found->bytes_used += bytes_used;
1272 spin_unlock(&found->lock);
1273 *space_info = found;
1276 found = kmalloc(sizeof(*found), GFP_NOFS);
1280 list_add(&found->list, &info->space_info);
1281 INIT_LIST_HEAD(&found->block_groups);
1282 init_rwsem(&found->groups_sem);
1283 spin_lock_init(&found->lock);
1284 found->flags = flags;
1285 found->total_bytes = total_bytes;
1286 found->bytes_used = bytes_used;
1287 found->bytes_pinned = 0;
1288 found->bytes_reserved = 0;
1290 found->force_alloc = 0;
1291 *space_info = found;
1295 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1297 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1298 BTRFS_BLOCK_GROUP_RAID1 |
1299 BTRFS_BLOCK_GROUP_RAID10 |
1300 BTRFS_BLOCK_GROUP_DUP);
1302 if (flags & BTRFS_BLOCK_GROUP_DATA)
1303 fs_info->avail_data_alloc_bits |= extra_flags;
1304 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1305 fs_info->avail_metadata_alloc_bits |= extra_flags;
1306 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1307 fs_info->avail_system_alloc_bits |= extra_flags;
1311 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1313 u64 num_devices = root->fs_info->fs_devices->num_devices;
1315 if (num_devices == 1)
1316 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1317 if (num_devices < 4)
1318 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1320 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1321 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1322 BTRFS_BLOCK_GROUP_RAID10))) {
1323 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1326 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1327 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1328 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1331 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1332 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1333 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1334 (flags & BTRFS_BLOCK_GROUP_DUP)))
1335 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1339 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1340 struct btrfs_root *extent_root, u64 alloc_bytes,
1341 u64 flags, int force)
1343 struct btrfs_space_info *space_info;
1347 int ret = 0, waited = 0;
1349 flags = reduce_alloc_profile(extent_root, flags);
1351 space_info = __find_space_info(extent_root->fs_info, flags);
1353 ret = update_space_info(extent_root->fs_info, flags,
1357 BUG_ON(!space_info);
1359 spin_lock(&space_info->lock);
1360 if (space_info->force_alloc) {
1362 space_info->force_alloc = 0;
1364 if (space_info->full) {
1365 spin_unlock(&space_info->lock);
1369 thresh = div_factor(space_info->total_bytes, 6);
1371 (space_info->bytes_used + space_info->bytes_pinned +
1372 space_info->bytes_reserved + alloc_bytes) < thresh) {
1373 spin_unlock(&space_info->lock);
1377 spin_unlock(&space_info->lock);
1379 ret = mutex_trylock(&extent_root->fs_info->chunk_mutex);
1380 if (!ret && !force) {
1383 mutex_lock(&extent_root->fs_info->chunk_mutex);
1388 spin_lock(&space_info->lock);
1389 if (space_info->full) {
1390 spin_unlock(&space_info->lock);
1393 spin_unlock(&space_info->lock);
1396 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1398 printk("space info full %Lu\n", flags);
1399 space_info->full = 1;
1403 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1404 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1407 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1412 static int update_block_group(struct btrfs_trans_handle *trans,
1413 struct btrfs_root *root,
1414 u64 bytenr, u64 num_bytes, int alloc,
1417 struct btrfs_block_group_cache *cache;
1418 struct btrfs_fs_info *info = root->fs_info;
1419 u64 total = num_bytes;
1424 cache = btrfs_lookup_block_group(info, bytenr);
1428 byte_in_group = bytenr - cache->key.objectid;
1429 WARN_ON(byte_in_group > cache->key.offset);
1431 spin_lock(&cache->space_info->lock);
1432 spin_lock(&cache->lock);
1434 old_val = btrfs_block_group_used(&cache->item);
1435 num_bytes = min(total, cache->key.offset - byte_in_group);
1437 old_val += num_bytes;
1438 cache->space_info->bytes_used += num_bytes;
1439 btrfs_set_block_group_used(&cache->item, old_val);
1440 spin_unlock(&cache->lock);
1441 spin_unlock(&cache->space_info->lock);
1443 old_val -= num_bytes;
1444 cache->space_info->bytes_used -= num_bytes;
1445 btrfs_set_block_group_used(&cache->item, old_val);
1446 spin_unlock(&cache->lock);
1447 spin_unlock(&cache->space_info->lock);
1450 ret = btrfs_add_free_space(cache, bytenr,
1457 bytenr += num_bytes;
1462 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1464 struct btrfs_block_group_cache *cache;
1466 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
1470 return cache->key.objectid;
1473 int btrfs_update_pinned_extents(struct btrfs_root *root,
1474 u64 bytenr, u64 num, int pin)
1477 struct btrfs_block_group_cache *cache;
1478 struct btrfs_fs_info *fs_info = root->fs_info;
1480 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
1482 set_extent_dirty(&fs_info->pinned_extents,
1483 bytenr, bytenr + num - 1, GFP_NOFS);
1485 clear_extent_dirty(&fs_info->pinned_extents,
1486 bytenr, bytenr + num - 1, GFP_NOFS);
1489 cache = btrfs_lookup_block_group(fs_info, bytenr);
1491 len = min(num, cache->key.offset -
1492 (bytenr - cache->key.objectid));
1494 spin_lock(&cache->space_info->lock);
1495 spin_lock(&cache->lock);
1496 cache->pinned += len;
1497 cache->space_info->bytes_pinned += len;
1498 spin_unlock(&cache->lock);
1499 spin_unlock(&cache->space_info->lock);
1500 fs_info->total_pinned += len;
1502 spin_lock(&cache->space_info->lock);
1503 spin_lock(&cache->lock);
1504 cache->pinned -= len;
1505 cache->space_info->bytes_pinned -= len;
1506 spin_unlock(&cache->lock);
1507 spin_unlock(&cache->space_info->lock);
1508 fs_info->total_pinned -= len;
1516 static int update_reserved_extents(struct btrfs_root *root,
1517 u64 bytenr, u64 num, int reserve)
1520 struct btrfs_block_group_cache *cache;
1521 struct btrfs_fs_info *fs_info = root->fs_info;
1524 cache = btrfs_lookup_block_group(fs_info, bytenr);
1526 len = min(num, cache->key.offset -
1527 (bytenr - cache->key.objectid));
1529 spin_lock(&cache->space_info->lock);
1530 spin_lock(&cache->lock);
1532 cache->reserved += len;
1533 cache->space_info->bytes_reserved += len;
1535 cache->reserved -= len;
1536 cache->space_info->bytes_reserved -= len;
1538 spin_unlock(&cache->lock);
1539 spin_unlock(&cache->space_info->lock);
1546 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1551 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1554 mutex_lock(&root->fs_info->pinned_mutex);
1556 ret = find_first_extent_bit(pinned_extents, last,
1557 &start, &end, EXTENT_DIRTY);
1560 set_extent_dirty(copy, start, end, GFP_NOFS);
1563 mutex_unlock(&root->fs_info->pinned_mutex);
1567 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1568 struct btrfs_root *root,
1569 struct extent_io_tree *unpin)
1574 struct btrfs_block_group_cache *cache;
1576 mutex_lock(&root->fs_info->pinned_mutex);
1578 ret = find_first_extent_bit(unpin, 0, &start, &end,
1582 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
1583 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1584 cache = btrfs_lookup_block_group(root->fs_info, start);
1586 btrfs_add_free_space(cache, start, end - start + 1);
1587 if (need_resched()) {
1588 mutex_unlock(&root->fs_info->pinned_mutex);
1590 mutex_lock(&root->fs_info->pinned_mutex);
1593 mutex_unlock(&root->fs_info->pinned_mutex);
1597 static int finish_current_insert(struct btrfs_trans_handle *trans,
1598 struct btrfs_root *extent_root, int all)
1604 struct btrfs_fs_info *info = extent_root->fs_info;
1605 struct btrfs_path *path;
1606 struct btrfs_extent_ref *ref;
1607 struct pending_extent_op *extent_op;
1608 struct btrfs_key key;
1609 struct btrfs_extent_item extent_item;
1613 btrfs_set_stack_extent_refs(&extent_item, 1);
1614 path = btrfs_alloc_path();
1617 mutex_lock(&info->extent_ins_mutex);
1618 ret = find_first_extent_bit(&info->extent_ins, search, &start,
1619 &end, EXTENT_WRITEBACK);
1621 mutex_unlock(&info->extent_ins_mutex);
1622 if (search && all) {
1629 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
1632 mutex_unlock(&info->extent_ins_mutex);
1638 ret = get_state_private(&info->extent_ins, start, &priv);
1640 extent_op = (struct pending_extent_op *)(unsigned long)priv;
1642 mutex_unlock(&info->extent_ins_mutex);
1644 if (extent_op->type == PENDING_EXTENT_INSERT) {
1645 key.objectid = start;
1646 key.offset = end + 1 - start;
1647 key.type = BTRFS_EXTENT_ITEM_KEY;
1648 err = btrfs_insert_item(trans, extent_root, &key,
1649 &extent_item, sizeof(extent_item));
1652 mutex_lock(&info->extent_ins_mutex);
1653 clear_extent_bits(&info->extent_ins, start, end,
1654 EXTENT_WRITEBACK, GFP_NOFS);
1655 mutex_unlock(&info->extent_ins_mutex);
1657 err = insert_extent_backref(trans, extent_root, path,
1658 start, extent_op->parent,
1659 extent_root->root_key.objectid,
1660 extent_op->generation,
1663 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
1664 err = lookup_extent_backref(trans, extent_root, path,
1665 start, extent_op->orig_parent,
1666 extent_root->root_key.objectid,
1667 extent_op->orig_generation,
1668 extent_op->level, 0);
1671 mutex_lock(&info->extent_ins_mutex);
1672 clear_extent_bits(&info->extent_ins, start, end,
1673 EXTENT_WRITEBACK, GFP_NOFS);
1674 mutex_unlock(&info->extent_ins_mutex);
1676 key.objectid = start;
1677 key.offset = extent_op->parent;
1678 key.type = BTRFS_EXTENT_REF_KEY;
1679 err = btrfs_set_item_key_safe(trans, extent_root, path,
1682 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1683 struct btrfs_extent_ref);
1684 btrfs_set_ref_generation(path->nodes[0], ref,
1685 extent_op->generation);
1686 btrfs_mark_buffer_dirty(path->nodes[0]);
1687 btrfs_release_path(extent_root, path);
1692 unlock_extent(&info->extent_ins, start, end, GFP_NOFS);
1700 btrfs_free_path(path);
1704 static int pin_down_bytes(struct btrfs_trans_handle *trans,
1705 struct btrfs_root *root,
1706 u64 bytenr, u64 num_bytes, int is_data)
1709 struct extent_buffer *buf;
1714 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1718 /* we can reuse a block if it hasn't been written
1719 * and it is from this transaction. We can't
1720 * reuse anything from the tree log root because
1721 * it has tiny sub-transactions.
1723 if (btrfs_buffer_uptodate(buf, 0) &&
1724 btrfs_try_tree_lock(buf)) {
1725 u64 header_owner = btrfs_header_owner(buf);
1726 u64 header_transid = btrfs_header_generation(buf);
1727 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
1728 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
1729 header_transid == trans->transid &&
1730 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
1731 clean_tree_block(NULL, root, buf);
1732 btrfs_tree_unlock(buf);
1733 free_extent_buffer(buf);
1736 btrfs_tree_unlock(buf);
1738 free_extent_buffer(buf);
1740 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
1747 * remove an extent from the root, returns 0 on success
1749 static int __free_extent(struct btrfs_trans_handle *trans,
1750 struct btrfs_root *root,
1751 u64 bytenr, u64 num_bytes, u64 parent,
1752 u64 root_objectid, u64 ref_generation,
1753 u64 owner_objectid, int pin, int mark_free)
1755 struct btrfs_path *path;
1756 struct btrfs_key key;
1757 struct btrfs_fs_info *info = root->fs_info;
1758 struct btrfs_root *extent_root = info->extent_root;
1759 struct extent_buffer *leaf;
1761 int extent_slot = 0;
1762 int found_extent = 0;
1764 struct btrfs_extent_item *ei;
1767 key.objectid = bytenr;
1768 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1769 key.offset = num_bytes;
1770 path = btrfs_alloc_path();
1775 ret = lookup_extent_backref(trans, extent_root, path,
1776 bytenr, parent, root_objectid,
1777 ref_generation, owner_objectid, 1);
1779 struct btrfs_key found_key;
1780 extent_slot = path->slots[0];
1781 while(extent_slot > 0) {
1783 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1785 if (found_key.objectid != bytenr)
1787 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1788 found_key.offset == num_bytes) {
1792 if (path->slots[0] - extent_slot > 5)
1795 if (!found_extent) {
1796 ret = remove_extent_backref(trans, extent_root, path);
1798 btrfs_release_path(extent_root, path);
1799 ret = btrfs_search_slot(trans, extent_root,
1802 extent_slot = path->slots[0];
1805 btrfs_print_leaf(extent_root, path->nodes[0]);
1807 printk("Unable to find ref byte nr %Lu root %Lu "
1808 "gen %Lu owner %Lu\n", bytenr,
1809 root_objectid, ref_generation, owner_objectid);
1812 leaf = path->nodes[0];
1813 ei = btrfs_item_ptr(leaf, extent_slot,
1814 struct btrfs_extent_item);
1815 refs = btrfs_extent_refs(leaf, ei);
1818 btrfs_set_extent_refs(leaf, ei, refs);
1820 btrfs_mark_buffer_dirty(leaf);
1822 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1823 struct btrfs_extent_ref *ref;
1824 ref = btrfs_item_ptr(leaf, path->slots[0],
1825 struct btrfs_extent_ref);
1826 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
1827 /* if the back ref and the extent are next to each other
1828 * they get deleted below in one shot
1830 path->slots[0] = extent_slot;
1832 } else if (found_extent) {
1833 /* otherwise delete the extent back ref */
1834 ret = remove_extent_backref(trans, extent_root, path);
1836 /* if refs are 0, we need to setup the path for deletion */
1838 btrfs_release_path(extent_root, path);
1839 ret = btrfs_search_slot(trans, extent_root, &key, path,
1848 #ifdef BIO_RW_DISCARD
1849 u64 map_length = num_bytes;
1850 struct btrfs_multi_bio *multi = NULL;
1854 mutex_lock(&root->fs_info->pinned_mutex);
1855 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
1856 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
1857 mutex_unlock(&root->fs_info->pinned_mutex);
1863 /* block accounting for super block */
1864 spin_lock_irq(&info->delalloc_lock);
1865 super_used = btrfs_super_bytes_used(&info->super_copy);
1866 btrfs_set_super_bytes_used(&info->super_copy,
1867 super_used - num_bytes);
1868 spin_unlock_irq(&info->delalloc_lock);
1870 /* block accounting for root item */
1871 root_used = btrfs_root_used(&root->root_item);
1872 btrfs_set_root_used(&root->root_item,
1873 root_used - num_bytes);
1874 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1877 btrfs_release_path(extent_root, path);
1878 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1882 #ifdef BIO_RW_DISCARD
1883 /* Tell the block device(s) that the sectors can be discarded */
1884 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1885 bytenr, &map_length, &multi, 0);
1887 struct btrfs_bio_stripe *stripe = multi->stripes;
1890 if (map_length > num_bytes)
1891 map_length = num_bytes;
1893 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1894 blkdev_issue_discard(stripe->dev->bdev,
1895 stripe->physical >> 9,
1902 btrfs_free_path(path);
1903 finish_current_insert(trans, extent_root, 0);
1908 * find all the blocks marked as pending in the radix tree and remove
1909 * them from the extent map
1911 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1912 btrfs_root *extent_root, int all)
1920 struct extent_io_tree *pending_del;
1921 struct extent_io_tree *extent_ins;
1922 struct pending_extent_op *extent_op;
1923 struct btrfs_fs_info *info = extent_root->fs_info;
1925 extent_ins = &extent_root->fs_info->extent_ins;
1926 pending_del = &extent_root->fs_info->pending_del;
1929 mutex_lock(&info->extent_ins_mutex);
1930 ret = find_first_extent_bit(pending_del, search, &start, &end,
1933 mutex_unlock(&info->extent_ins_mutex);
1934 if (all && search) {
1941 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
1944 mutex_unlock(&info->extent_ins_mutex);
1950 ret = get_state_private(pending_del, start, &priv);
1952 extent_op = (struct pending_extent_op *)(unsigned long)priv;
1954 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
1956 if (!test_range_bit(extent_ins, start, end,
1957 EXTENT_WRITEBACK, 0)) {
1958 mutex_unlock(&info->extent_ins_mutex);
1960 ret = __free_extent(trans, extent_root,
1961 start, end + 1 - start,
1962 extent_op->orig_parent,
1963 extent_root->root_key.objectid,
1964 extent_op->orig_generation,
1965 extent_op->level, 1, 0);
1970 ret = get_state_private(&info->extent_ins, start,
1973 extent_op = (struct pending_extent_op *)
1974 (unsigned long)priv;
1976 clear_extent_bits(&info->extent_ins, start, end,
1977 EXTENT_WRITEBACK, GFP_NOFS);
1979 mutex_unlock(&info->extent_ins_mutex);
1981 if (extent_op->type == PENDING_BACKREF_UPDATE)
1984 mutex_lock(&extent_root->fs_info->pinned_mutex);
1985 ret = pin_down_bytes(trans, extent_root, start,
1986 end + 1 - start, 0);
1987 mutex_unlock(&extent_root->fs_info->pinned_mutex);
1989 ret = update_block_group(trans, extent_root, start,
1990 end + 1 - start, 0, ret > 0);
1997 unlock_extent(extent_ins, start, end, GFP_NOFS);
2009 * remove an extent from the root, returns 0 on success
2011 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2012 struct btrfs_root *root,
2013 u64 bytenr, u64 num_bytes, u64 parent,
2014 u64 root_objectid, u64 ref_generation,
2015 u64 owner_objectid, int pin)
2017 struct btrfs_root *extent_root = root->fs_info->extent_root;
2021 WARN_ON(num_bytes < root->sectorsize);
2022 if (root == extent_root) {
2023 struct pending_extent_op *extent_op;
2025 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2028 extent_op->type = PENDING_EXTENT_DELETE;
2029 extent_op->bytenr = bytenr;
2030 extent_op->num_bytes = num_bytes;
2031 extent_op->parent = parent;
2032 extent_op->orig_parent = parent;
2033 extent_op->generation = ref_generation;
2034 extent_op->orig_generation = ref_generation;
2035 extent_op->level = (int)owner_objectid;
2037 mutex_lock(&root->fs_info->extent_ins_mutex);
2038 set_extent_bits(&root->fs_info->pending_del,
2039 bytenr, bytenr + num_bytes - 1,
2040 EXTENT_WRITEBACK, GFP_NOFS);
2041 set_state_private(&root->fs_info->pending_del,
2042 bytenr, (unsigned long)extent_op);
2043 mutex_unlock(&root->fs_info->extent_ins_mutex);
2046 /* if metadata always pin */
2047 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2048 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2049 struct btrfs_block_group_cache *cache;
2051 /* btrfs_free_reserved_extent */
2052 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2054 btrfs_add_free_space(cache, bytenr, num_bytes);
2055 update_reserved_extents(root, bytenr, num_bytes, 0);
2061 /* if data pin when any transaction has committed this */
2062 if (ref_generation != trans->transid)
2065 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2066 root_objectid, ref_generation,
2067 owner_objectid, pin, pin == 0);
2069 finish_current_insert(trans, root->fs_info->extent_root, 0);
2070 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2071 return ret ? ret : pending_ret;
2074 int btrfs_free_extent(struct btrfs_trans_handle *trans,
2075 struct btrfs_root *root,
2076 u64 bytenr, u64 num_bytes, u64 parent,
2077 u64 root_objectid, u64 ref_generation,
2078 u64 owner_objectid, int pin)
2082 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2083 root_objectid, ref_generation,
2084 owner_objectid, pin);
2088 static u64 stripe_align(struct btrfs_root *root, u64 val)
2090 u64 mask = ((u64)root->stripesize - 1);
2091 u64 ret = (val + mask) & ~mask;
2096 * walks the btree of allocated extents and find a hole of a given size.
2097 * The key ins is changed to record the hole:
2098 * ins->objectid == block start
2099 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2100 * ins->offset == number of blocks
2101 * Any available blocks before search_start are skipped.
2103 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2104 struct btrfs_root *orig_root,
2105 u64 num_bytes, u64 empty_size,
2106 u64 search_start, u64 search_end,
2107 u64 hint_byte, struct btrfs_key *ins,
2108 u64 exclude_start, u64 exclude_nr,
2112 struct btrfs_root * root = orig_root->fs_info->extent_root;
2113 u64 total_needed = num_bytes;
2114 u64 *last_ptr = NULL;
2115 struct btrfs_block_group_cache *block_group = NULL;
2116 int chunk_alloc_done = 0;
2117 int empty_cluster = 2 * 1024 * 1024;
2118 int allowed_chunk_alloc = 0;
2119 struct list_head *head = NULL, *cur = NULL;
2121 struct btrfs_space_info *space_info;
2123 WARN_ON(num_bytes < root->sectorsize);
2124 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2128 if (orig_root->ref_cows || empty_size)
2129 allowed_chunk_alloc = 1;
2131 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2132 last_ptr = &root->fs_info->last_alloc;
2133 empty_cluster = 256 * 1024;
2136 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2137 last_ptr = &root->fs_info->last_data_alloc;
2141 hint_byte = *last_ptr;
2143 empty_size += empty_cluster;
2145 search_start = max(search_start, first_logical_byte(root, 0));
2146 search_start = max(search_start, hint_byte);
2147 total_needed += empty_size;
2149 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2151 block_group = btrfs_lookup_first_block_group(root->fs_info,
2153 space_info = __find_space_info(root->fs_info, data);
2155 down_read(&space_info->groups_sem);
2157 struct btrfs_free_space *free_space;
2159 * the only way this happens if our hint points to a block
2160 * group thats not of the proper type, while looping this
2161 * should never happen
2163 WARN_ON(!block_group);
2164 mutex_lock(&block_group->alloc_mutex);
2165 if (unlikely(!block_group_bits(block_group, data)))
2168 ret = cache_block_group(root, block_group);
2170 mutex_unlock(&block_group->alloc_mutex);
2174 if (block_group->ro)
2177 free_space = btrfs_find_free_space(block_group, search_start,
2180 u64 start = block_group->key.objectid;
2181 u64 end = block_group->key.objectid +
2182 block_group->key.offset;
2184 search_start = stripe_align(root, free_space->offset);
2186 /* move on to the next group */
2187 if (search_start + num_bytes >= search_end)
2190 /* move on to the next group */
2191 if (search_start + num_bytes > end)
2194 if (exclude_nr > 0 &&
2195 (search_start + num_bytes > exclude_start &&
2196 search_start < exclude_start + exclude_nr)) {
2197 search_start = exclude_start + exclude_nr;
2199 * if search_start is still in this block group
2200 * then we just re-search this block group
2202 if (search_start >= start &&
2203 search_start < end) {
2204 mutex_unlock(&block_group->alloc_mutex);
2208 /* else we go to the next block group */
2212 ins->objectid = search_start;
2213 ins->offset = num_bytes;
2215 btrfs_remove_free_space_lock(block_group, search_start,
2217 /* we are all good, lets return */
2218 mutex_unlock(&block_group->alloc_mutex);
2222 mutex_unlock(&block_group->alloc_mutex);
2224 * Here's how this works.
2225 * loop == 0: we were searching a block group via a hint
2226 * and didn't find anything, so we start at
2227 * the head of the block groups and keep searching
2228 * loop == 1: we're searching through all of the block groups
2229 * if we hit the head again we have searched
2230 * all of the block groups for this space and we
2231 * need to try and allocate, if we cant error out.
2232 * loop == 2: we allocated more space and are looping through
2233 * all of the block groups again.
2236 head = &space_info->block_groups;
2239 if (last_ptr && *last_ptr) {
2240 total_needed += empty_cluster;
2244 } else if (loop == 1 && cur == head) {
2245 if (allowed_chunk_alloc && !chunk_alloc_done) {
2246 up_read(&space_info->groups_sem);
2247 ret = do_chunk_alloc(trans, root, num_bytes +
2248 2 * 1024 * 1024, data, 1);
2251 down_read(&space_info->groups_sem);
2253 head = &space_info->block_groups;
2255 chunk_alloc_done = 1;
2256 } else if (!allowed_chunk_alloc) {
2257 space_info->force_alloc = 1;
2262 } else if (cur == head) {
2266 block_group = list_entry(cur, struct btrfs_block_group_cache,
2268 search_start = block_group->key.objectid;
2272 /* we found what we needed */
2273 if (ins->objectid) {
2274 if (!(data & BTRFS_BLOCK_GROUP_DATA))
2275 trans->block_group = block_group;
2278 *last_ptr = ins->objectid + ins->offset;
2284 up_read(&space_info->groups_sem);
2288 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
2290 struct btrfs_block_group_cache *cache;
2291 struct list_head *l;
2293 printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
2294 info->total_bytes - info->bytes_used - info->bytes_pinned -
2295 info->bytes_reserved, (info->full) ? "" : "not ");
2297 down_read(&info->groups_sem);
2298 list_for_each(l, &info->block_groups) {
2299 cache = list_entry(l, struct btrfs_block_group_cache, list);
2300 spin_lock(&cache->lock);
2301 printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
2302 "%Lu pinned %Lu reserved\n",
2303 cache->key.objectid, cache->key.offset,
2304 btrfs_block_group_used(&cache->item),
2305 cache->pinned, cache->reserved);
2306 btrfs_dump_free_space(cache, bytes);
2307 spin_unlock(&cache->lock);
2309 up_read(&info->groups_sem);
2312 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2313 struct btrfs_root *root,
2314 u64 num_bytes, u64 min_alloc_size,
2315 u64 empty_size, u64 hint_byte,
2316 u64 search_end, struct btrfs_key *ins,
2320 u64 search_start = 0;
2322 struct btrfs_fs_info *info = root->fs_info;
2325 alloc_profile = info->avail_data_alloc_bits &
2326 info->data_alloc_profile;
2327 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2328 } else if (root == root->fs_info->chunk_root) {
2329 alloc_profile = info->avail_system_alloc_bits &
2330 info->system_alloc_profile;
2331 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2333 alloc_profile = info->avail_metadata_alloc_bits &
2334 info->metadata_alloc_profile;
2335 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2338 data = reduce_alloc_profile(root, data);
2340 * the only place that sets empty_size is btrfs_realloc_node, which
2341 * is not called recursively on allocations
2343 if (empty_size || root->ref_cows) {
2344 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2345 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2347 BTRFS_BLOCK_GROUP_METADATA |
2348 (info->metadata_alloc_profile &
2349 info->avail_metadata_alloc_bits), 0);
2351 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2352 num_bytes + 2 * 1024 * 1024, data, 0);
2355 WARN_ON(num_bytes < root->sectorsize);
2356 ret = find_free_extent(trans, root, num_bytes, empty_size,
2357 search_start, search_end, hint_byte, ins,
2358 trans->alloc_exclude_start,
2359 trans->alloc_exclude_nr, data);
2361 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2362 num_bytes = num_bytes >> 1;
2363 num_bytes = num_bytes & ~(root->sectorsize - 1);
2364 num_bytes = max(num_bytes, min_alloc_size);
2365 do_chunk_alloc(trans, root->fs_info->extent_root,
2366 num_bytes, data, 1);
2370 struct btrfs_space_info *sinfo;
2372 sinfo = __find_space_info(root->fs_info, data);
2373 printk("allocation failed flags %Lu, wanted %Lu\n",
2375 dump_space_info(sinfo, num_bytes);
2382 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2384 struct btrfs_block_group_cache *cache;
2386 cache = btrfs_lookup_block_group(root->fs_info, start);
2388 printk(KERN_ERR "Unable to find block group for %Lu\n", start);
2391 btrfs_add_free_space(cache, start, len);
2392 update_reserved_extents(root, start, len, 0);
2396 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2397 struct btrfs_root *root,
2398 u64 num_bytes, u64 min_alloc_size,
2399 u64 empty_size, u64 hint_byte,
2400 u64 search_end, struct btrfs_key *ins,
2404 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2405 empty_size, hint_byte, search_end, ins,
2407 update_reserved_extents(root, ins->objectid, ins->offset, 1);
2411 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2412 struct btrfs_root *root, u64 parent,
2413 u64 root_objectid, u64 ref_generation,
2414 u64 owner, struct btrfs_key *ins)
2420 u64 num_bytes = ins->offset;
2422 struct btrfs_fs_info *info = root->fs_info;
2423 struct btrfs_root *extent_root = info->extent_root;
2424 struct btrfs_extent_item *extent_item;
2425 struct btrfs_extent_ref *ref;
2426 struct btrfs_path *path;
2427 struct btrfs_key keys[2];
2430 parent = ins->objectid;
2432 /* block accounting for super block */
2433 spin_lock_irq(&info->delalloc_lock);
2434 super_used = btrfs_super_bytes_used(&info->super_copy);
2435 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2436 spin_unlock_irq(&info->delalloc_lock);
2438 /* block accounting for root item */
2439 root_used = btrfs_root_used(&root->root_item);
2440 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2442 if (root == extent_root) {
2443 struct pending_extent_op *extent_op;
2445 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2448 extent_op->type = PENDING_EXTENT_INSERT;
2449 extent_op->bytenr = ins->objectid;
2450 extent_op->num_bytes = ins->offset;
2451 extent_op->parent = parent;
2452 extent_op->orig_parent = 0;
2453 extent_op->generation = ref_generation;
2454 extent_op->orig_generation = 0;
2455 extent_op->level = (int)owner;
2457 mutex_lock(&root->fs_info->extent_ins_mutex);
2458 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2459 ins->objectid + ins->offset - 1,
2460 EXTENT_WRITEBACK, GFP_NOFS);
2461 set_state_private(&root->fs_info->extent_ins,
2462 ins->objectid, (unsigned long)extent_op);
2463 mutex_unlock(&root->fs_info->extent_ins_mutex);
2467 memcpy(&keys[0], ins, sizeof(*ins));
2468 keys[1].objectid = ins->objectid;
2469 keys[1].type = BTRFS_EXTENT_REF_KEY;
2470 keys[1].offset = parent;
2471 sizes[0] = sizeof(*extent_item);
2472 sizes[1] = sizeof(*ref);
2474 path = btrfs_alloc_path();
2477 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2481 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2482 struct btrfs_extent_item);
2483 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2484 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2485 struct btrfs_extent_ref);
2487 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2488 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2489 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2490 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
2492 btrfs_mark_buffer_dirty(path->nodes[0]);
2494 trans->alloc_exclude_start = 0;
2495 trans->alloc_exclude_nr = 0;
2496 btrfs_free_path(path);
2497 finish_current_insert(trans, extent_root, 0);
2498 pending_ret = del_pending_extents(trans, extent_root, 0);
2508 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2510 printk("update block group failed for %Lu %Lu\n",
2511 ins->objectid, ins->offset);
2518 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2519 struct btrfs_root *root, u64 parent,
2520 u64 root_objectid, u64 ref_generation,
2521 u64 owner, struct btrfs_key *ins)
2525 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
2527 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
2528 ref_generation, owner, ins);
2529 update_reserved_extents(root, ins->objectid, ins->offset, 0);
2534 * this is used by the tree logging recovery code. It records that
2535 * an extent has been allocated and makes sure to clear the free
2536 * space cache bits as well
2538 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
2539 struct btrfs_root *root, u64 parent,
2540 u64 root_objectid, u64 ref_generation,
2541 u64 owner, struct btrfs_key *ins)
2544 struct btrfs_block_group_cache *block_group;
2546 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2547 mutex_lock(&block_group->alloc_mutex);
2548 cache_block_group(root, block_group);
2550 ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
2552 mutex_unlock(&block_group->alloc_mutex);
2554 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
2555 ref_generation, owner, ins);
2560 * finds a free extent and does all the dirty work required for allocation
2561 * returns the key for the extent through ins, and a tree buffer for
2562 * the first block of the extent through buf.
2564 * returns 0 if everything worked, non-zero otherwise.
2566 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2567 struct btrfs_root *root,
2568 u64 num_bytes, u64 parent, u64 min_alloc_size,
2569 u64 root_objectid, u64 ref_generation,
2570 u64 owner_objectid, u64 empty_size, u64 hint_byte,
2571 u64 search_end, struct btrfs_key *ins, u64 data)
2575 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2576 min_alloc_size, empty_size, hint_byte,
2577 search_end, ins, data);
2579 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
2580 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
2581 root_objectid, ref_generation,
2582 owner_objectid, ins);
2586 update_reserved_extents(root, ins->objectid, ins->offset, 1);
2591 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2592 struct btrfs_root *root,
2593 u64 bytenr, u32 blocksize)
2595 struct extent_buffer *buf;
2597 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2599 return ERR_PTR(-ENOMEM);
2600 btrfs_set_header_generation(buf, trans->transid);
2601 btrfs_tree_lock(buf);
2602 clean_tree_block(trans, root, buf);
2603 btrfs_set_buffer_uptodate(buf);
2604 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2605 set_extent_dirty(&root->dirty_log_pages, buf->start,
2606 buf->start + buf->len - 1, GFP_NOFS);
2608 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2609 buf->start + buf->len - 1, GFP_NOFS);
2611 trans->blocks_used++;
2616 * helper function to allocate a block for a given tree
2617 * returns the tree buffer or NULL.
2619 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2620 struct btrfs_root *root,
2621 u32 blocksize, u64 parent,
2628 struct btrfs_key ins;
2630 struct extent_buffer *buf;
2632 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
2633 root_objectid, ref_generation, level,
2634 empty_size, hint, (u64)-1, &ins, 0);
2637 return ERR_PTR(ret);
2640 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2644 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2645 struct btrfs_root *root, struct extent_buffer *leaf)
2648 u64 leaf_generation;
2649 struct btrfs_key key;
2650 struct btrfs_file_extent_item *fi;
2655 BUG_ON(!btrfs_is_leaf(leaf));
2656 nritems = btrfs_header_nritems(leaf);
2657 leaf_owner = btrfs_header_owner(leaf);
2658 leaf_generation = btrfs_header_generation(leaf);
2660 for (i = 0; i < nritems; i++) {
2664 btrfs_item_key_to_cpu(leaf, &key, i);
2665 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2667 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2668 if (btrfs_file_extent_type(leaf, fi) ==
2669 BTRFS_FILE_EXTENT_INLINE)
2672 * FIXME make sure to insert a trans record that
2673 * repeats the snapshot del on crash
2675 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2676 if (disk_bytenr == 0)
2679 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2680 btrfs_file_extent_disk_num_bytes(leaf, fi),
2681 leaf->start, leaf_owner, leaf_generation,
2685 atomic_inc(&root->fs_info->throttle_gen);
2686 wake_up(&root->fs_info->transaction_throttle);
2692 static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
2693 struct btrfs_root *root,
2694 struct btrfs_leaf_ref *ref)
2698 struct btrfs_extent_info *info = ref->extents;
2700 for (i = 0; i < ref->nritems; i++) {
2701 ret = __btrfs_free_extent(trans, root, info->bytenr,
2702 info->num_bytes, ref->bytenr,
2703 ref->owner, ref->generation,
2706 atomic_inc(&root->fs_info->throttle_gen);
2707 wake_up(&root->fs_info->transaction_throttle);
2717 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2722 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
2725 #if 0 // some debugging code in case we see problems here
2726 /* if the refs count is one, it won't get increased again. But
2727 * if the ref count is > 1, someone may be decreasing it at
2728 * the same time we are.
2731 struct extent_buffer *eb = NULL;
2732 eb = btrfs_find_create_tree_block(root, start, len);
2734 btrfs_tree_lock(eb);
2736 mutex_lock(&root->fs_info->alloc_mutex);
2737 ret = lookup_extent_ref(NULL, root, start, len, refs);
2739 mutex_unlock(&root->fs_info->alloc_mutex);
2742 btrfs_tree_unlock(eb);
2743 free_extent_buffer(eb);
2746 printk("block %llu went down to one during drop_snap\n",
2747 (unsigned long long)start);
2758 * helper function for drop_snapshot, this walks down the tree dropping ref
2759 * counts as it goes.
2761 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2762 struct btrfs_root *root,
2763 struct btrfs_path *path, int *level)
2769 struct extent_buffer *next;
2770 struct extent_buffer *cur;
2771 struct extent_buffer *parent;
2772 struct btrfs_leaf_ref *ref;
2777 WARN_ON(*level < 0);
2778 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2779 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2780 path->nodes[*level]->len, &refs);
2786 * walk down to the last node level and free all the leaves
2788 while(*level >= 0) {
2789 WARN_ON(*level < 0);
2790 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2791 cur = path->nodes[*level];
2793 if (btrfs_header_level(cur) != *level)
2796 if (path->slots[*level] >=
2797 btrfs_header_nritems(cur))
2800 ret = btrfs_drop_leaf_ref(trans, root, cur);
2804 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2805 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2806 blocksize = btrfs_level_size(root, *level - 1);
2808 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2811 parent = path->nodes[*level];
2812 root_owner = btrfs_header_owner(parent);
2813 root_gen = btrfs_header_generation(parent);
2814 path->slots[*level]++;
2816 ret = __btrfs_free_extent(trans, root, bytenr,
2817 blocksize, parent->start,
2818 root_owner, root_gen,
2822 atomic_inc(&root->fs_info->throttle_gen);
2823 wake_up(&root->fs_info->transaction_throttle);
2829 * at this point, we have a single ref, and since the
2830 * only place referencing this extent is a dead root
2831 * the reference count should never go higher.
2832 * So, we don't need to check it again
2835 ref = btrfs_lookup_leaf_ref(root, bytenr);
2836 if (ref && ref->generation != ptr_gen) {
2837 btrfs_free_leaf_ref(root, ref);
2841 ret = cache_drop_leaf_ref(trans, root, ref);
2843 btrfs_remove_leaf_ref(root, ref);
2844 btrfs_free_leaf_ref(root, ref);
2848 if (printk_ratelimit()) {
2849 printk("leaf ref miss for bytenr %llu\n",
2850 (unsigned long long)bytenr);
2853 next = btrfs_find_tree_block(root, bytenr, blocksize);
2854 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2855 free_extent_buffer(next);
2857 next = read_tree_block(root, bytenr, blocksize,
2862 * this is a debugging check and can go away
2863 * the ref should never go all the way down to 1
2866 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2872 WARN_ON(*level <= 0);
2873 if (path->nodes[*level-1])
2874 free_extent_buffer(path->nodes[*level-1]);
2875 path->nodes[*level-1] = next;
2876 *level = btrfs_header_level(next);
2877 path->slots[*level] = 0;
2881 WARN_ON(*level < 0);
2882 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2884 if (path->nodes[*level] == root->node) {
2885 parent = path->nodes[*level];
2886 bytenr = path->nodes[*level]->start;
2888 parent = path->nodes[*level + 1];
2889 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2892 blocksize = btrfs_level_size(root, *level);
2893 root_owner = btrfs_header_owner(parent);
2894 root_gen = btrfs_header_generation(parent);
2896 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2897 parent->start, root_owner, root_gen,
2899 free_extent_buffer(path->nodes[*level]);
2900 path->nodes[*level] = NULL;
2909 * helper function for drop_subtree, this function is similar to
2910 * walk_down_tree. The main difference is that it checks reference
2911 * counts while tree blocks are locked.
2913 static int noinline walk_down_subtree(struct btrfs_trans_handle *trans,
2914 struct btrfs_root *root,
2915 struct btrfs_path *path, int *level)
2917 struct extent_buffer *next;
2918 struct extent_buffer *cur;
2919 struct extent_buffer *parent;
2926 cur = path->nodes[*level];
2927 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
2933 while (*level >= 0) {
2934 cur = path->nodes[*level];
2936 ret = btrfs_drop_leaf_ref(trans, root, cur);
2938 clean_tree_block(trans, root, cur);
2941 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
2942 clean_tree_block(trans, root, cur);
2946 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2947 blocksize = btrfs_level_size(root, *level - 1);
2948 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2950 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
2951 btrfs_tree_lock(next);
2953 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
2957 parent = path->nodes[*level];
2958 ret = btrfs_free_extent(trans, root, bytenr,
2959 blocksize, parent->start,
2960 btrfs_header_owner(parent),
2961 btrfs_header_generation(parent),
2964 path->slots[*level]++;
2965 btrfs_tree_unlock(next);
2966 free_extent_buffer(next);
2970 *level = btrfs_header_level(next);
2971 path->nodes[*level] = next;
2972 path->slots[*level] = 0;
2973 path->locks[*level] = 1;
2977 parent = path->nodes[*level + 1];
2978 bytenr = path->nodes[*level]->start;
2979 blocksize = path->nodes[*level]->len;
2981 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
2982 parent->start, btrfs_header_owner(parent),
2983 btrfs_header_generation(parent), *level, 1);
2986 if (path->locks[*level]) {
2987 btrfs_tree_unlock(path->nodes[*level]);
2988 path->locks[*level] = 0;
2990 free_extent_buffer(path->nodes[*level]);
2991 path->nodes[*level] = NULL;
2998 * helper for dropping snapshots. This walks back up the tree in the path
2999 * to find the first node higher up where we haven't yet gone through
3002 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
3003 struct btrfs_root *root,
3004 struct btrfs_path *path,
3005 int *level, int max_level)
3009 struct btrfs_root_item *root_item = &root->root_item;
3014 for (i = *level; i < max_level && path->nodes[i]; i++) {
3015 slot = path->slots[i];
3016 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3017 struct extent_buffer *node;
3018 struct btrfs_disk_key disk_key;
3019 node = path->nodes[i];
3022 WARN_ON(*level == 0);
3023 btrfs_node_key(node, &disk_key, path->slots[i]);
3024 memcpy(&root_item->drop_progress,
3025 &disk_key, sizeof(disk_key));
3026 root_item->drop_level = i;
3029 struct extent_buffer *parent;
3030 if (path->nodes[*level] == root->node)
3031 parent = path->nodes[*level];
3033 parent = path->nodes[*level + 1];
3035 root_owner = btrfs_header_owner(parent);
3036 root_gen = btrfs_header_generation(parent);
3038 clean_tree_block(trans, root, path->nodes[*level]);
3039 ret = btrfs_free_extent(trans, root,
3040 path->nodes[*level]->start,
3041 path->nodes[*level]->len,
3042 parent->start, root_owner,
3043 root_gen, *level, 1);
3045 if (path->locks[*level]) {
3046 btrfs_tree_unlock(path->nodes[*level]);
3047 path->locks[*level] = 0;
3049 free_extent_buffer(path->nodes[*level]);
3050 path->nodes[*level] = NULL;
3058 * drop the reference count on the tree rooted at 'snap'. This traverses
3059 * the tree freeing any blocks that have a ref count of zero after being
3062 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3068 struct btrfs_path *path;
3071 struct btrfs_root_item *root_item = &root->root_item;
3073 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3074 path = btrfs_alloc_path();
3077 level = btrfs_header_level(root->node);
3079 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3080 path->nodes[level] = root->node;
3081 extent_buffer_get(root->node);
3082 path->slots[level] = 0;
3084 struct btrfs_key key;
3085 struct btrfs_disk_key found_key;
3086 struct extent_buffer *node;
3088 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3089 level = root_item->drop_level;
3090 path->lowest_level = level;
3091 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3096 node = path->nodes[level];
3097 btrfs_node_key(node, &found_key, path->slots[level]);
3098 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3099 sizeof(found_key)));
3101 * unlock our path, this is safe because only this
3102 * function is allowed to delete this snapshot
3104 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3105 if (path->nodes[i] && path->locks[i]) {
3107 btrfs_tree_unlock(path->nodes[i]);
3112 wret = walk_down_tree(trans, root, path, &level);
3118 wret = walk_up_tree(trans, root, path, &level,
3124 if (trans->transaction->in_commit) {
3128 atomic_inc(&root->fs_info->throttle_gen);
3129 wake_up(&root->fs_info->transaction_throttle);
3131 for (i = 0; i <= orig_level; i++) {
3132 if (path->nodes[i]) {
3133 free_extent_buffer(path->nodes[i]);
3134 path->nodes[i] = NULL;
3138 btrfs_free_path(path);
3142 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3143 struct btrfs_root *root,
3144 struct extent_buffer *node,
3145 struct extent_buffer *parent)
3147 struct btrfs_path *path;
3153 path = btrfs_alloc_path();
3156 BUG_ON(!btrfs_tree_locked(parent));
3157 parent_level = btrfs_header_level(parent);
3158 extent_buffer_get(parent);
3159 path->nodes[parent_level] = parent;
3160 path->slots[parent_level] = btrfs_header_nritems(parent);
3162 BUG_ON(!btrfs_tree_locked(node));
3163 level = btrfs_header_level(node);
3164 extent_buffer_get(node);
3165 path->nodes[level] = node;
3166 path->slots[level] = 0;
3169 wret = walk_down_subtree(trans, root, path, &level);
3175 wret = walk_up_tree(trans, root, path, &level, parent_level);
3182 btrfs_free_path(path);
3186 static unsigned long calc_ra(unsigned long start, unsigned long last,
3189 return min(last, start + nr - 1);
3192 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
3197 unsigned long first_index;
3198 unsigned long last_index;
3201 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3202 struct file_ra_state *ra;
3203 struct btrfs_ordered_extent *ordered;
3204 unsigned int total_read = 0;
3205 unsigned int total_dirty = 0;
3208 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3210 mutex_lock(&inode->i_mutex);
3211 first_index = start >> PAGE_CACHE_SHIFT;
3212 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3214 /* make sure the dirty trick played by the caller work */
3215 ret = invalidate_inode_pages2_range(inode->i_mapping,
3216 first_index, last_index);
3220 file_ra_state_init(ra, inode->i_mapping);
3222 for (i = first_index ; i <= last_index; i++) {
3223 if (total_read % ra->ra_pages == 0) {
3224 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3225 calc_ra(i, last_index, ra->ra_pages));
3229 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3231 page = grab_cache_page(inode->i_mapping, i);
3236 if (!PageUptodate(page)) {
3237 btrfs_readpage(NULL, page);
3239 if (!PageUptodate(page)) {
3241 page_cache_release(page);
3246 wait_on_page_writeback(page);
3248 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3249 page_end = page_start + PAGE_CACHE_SIZE - 1;
3250 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3252 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3254 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3256 page_cache_release(page);
3257 btrfs_start_ordered_extent(inode, ordered, 1);
3258 btrfs_put_ordered_extent(ordered);
3261 set_page_extent_mapped(page);
3263 btrfs_set_extent_delalloc(inode, page_start, page_end);
3264 if (i == first_index)
3265 set_extent_bits(io_tree, page_start, page_end,
3266 EXTENT_BOUNDARY, GFP_NOFS);
3268 set_page_dirty(page);
3271 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3273 page_cache_release(page);
3278 mutex_unlock(&inode->i_mutex);
3279 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
3283 static int noinline relocate_data_extent(struct inode *reloc_inode,
3284 struct btrfs_key *extent_key,
3287 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
3288 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
3289 struct extent_map *em;
3290 u64 start = extent_key->objectid - offset;
3291 u64 end = start + extent_key->offset - 1;
3293 em = alloc_extent_map(GFP_NOFS);
3294 BUG_ON(!em || IS_ERR(em));
3297 em->len = extent_key->offset;
3298 em->block_len = extent_key->offset;
3299 em->block_start = extent_key->objectid;
3300 em->bdev = root->fs_info->fs_devices->latest_bdev;
3301 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3303 /* setup extent map to cheat btrfs_readpage */
3304 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
3307 spin_lock(&em_tree->lock);
3308 ret = add_extent_mapping(em_tree, em);
3309 spin_unlock(&em_tree->lock);
3310 if (ret != -EEXIST) {
3311 free_extent_map(em);
3314 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
3316 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
3318 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
3321 struct btrfs_ref_path {
3323 u64 nodes[BTRFS_MAX_LEVEL];
3325 u64 root_generation;
3332 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
3333 u64 new_nodes[BTRFS_MAX_LEVEL];
3336 struct disk_extent {
3347 static int is_cowonly_root(u64 root_objectid)
3349 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
3350 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
3351 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
3352 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
3353 root_objectid == BTRFS_TREE_LOG_OBJECTID)
3358 static int noinline __next_ref_path(struct btrfs_trans_handle *trans,
3359 struct btrfs_root *extent_root,
3360 struct btrfs_ref_path *ref_path,
3363 struct extent_buffer *leaf;
3364 struct btrfs_path *path;
3365 struct btrfs_extent_ref *ref;
3366 struct btrfs_key key;
3367 struct btrfs_key found_key;
3373 path = btrfs_alloc_path();
3378 ref_path->lowest_level = -1;
3379 ref_path->current_level = -1;
3380 ref_path->shared_level = -1;
3384 level = ref_path->current_level - 1;
3385 while (level >= -1) {
3387 if (level < ref_path->lowest_level)
3391 bytenr = ref_path->nodes[level];
3393 bytenr = ref_path->extent_start;
3395 BUG_ON(bytenr == 0);
3397 parent = ref_path->nodes[level + 1];
3398 ref_path->nodes[level + 1] = 0;
3399 ref_path->current_level = level;
3400 BUG_ON(parent == 0);
3402 key.objectid = bytenr;
3403 key.offset = parent + 1;
3404 key.type = BTRFS_EXTENT_REF_KEY;
3406 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
3411 leaf = path->nodes[0];
3412 nritems = btrfs_header_nritems(leaf);
3413 if (path->slots[0] >= nritems) {
3414 ret = btrfs_next_leaf(extent_root, path);
3419 leaf = path->nodes[0];
3422 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3423 if (found_key.objectid == bytenr &&
3424 found_key.type == BTRFS_EXTENT_REF_KEY) {
3425 if (level < ref_path->shared_level)
3426 ref_path->shared_level = level;
3431 btrfs_release_path(extent_root, path);
3434 /* reached lowest level */
3438 level = ref_path->current_level;
3439 while (level < BTRFS_MAX_LEVEL - 1) {
3442 bytenr = ref_path->nodes[level];
3444 bytenr = ref_path->extent_start;
3446 BUG_ON(bytenr == 0);
3448 key.objectid = bytenr;
3450 key.type = BTRFS_EXTENT_REF_KEY;
3452 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
3456 leaf = path->nodes[0];
3457 nritems = btrfs_header_nritems(leaf);
3458 if (path->slots[0] >= nritems) {
3459 ret = btrfs_next_leaf(extent_root, path);
3463 /* the extent was freed by someone */
3464 if (ref_path->lowest_level == level)
3466 btrfs_release_path(extent_root, path);
3469 leaf = path->nodes[0];
3472 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3473 if (found_key.objectid != bytenr ||
3474 found_key.type != BTRFS_EXTENT_REF_KEY) {
3475 /* the extent was freed by someone */
3476 if (ref_path->lowest_level == level) {
3480 btrfs_release_path(extent_root, path);
3484 ref = btrfs_item_ptr(leaf, path->slots[0],
3485 struct btrfs_extent_ref);
3486 ref_objectid = btrfs_ref_objectid(leaf, ref);
3487 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3489 level = (int)ref_objectid;
3490 BUG_ON(level >= BTRFS_MAX_LEVEL);
3491 ref_path->lowest_level = level;
3492 ref_path->current_level = level;
3493 ref_path->nodes[level] = bytenr;
3495 WARN_ON(ref_objectid != level);
3498 WARN_ON(level != -1);
3502 if (ref_path->lowest_level == level) {
3503 ref_path->owner_objectid = ref_objectid;
3504 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
3508 * the block is tree root or the block isn't in reference
3511 if (found_key.objectid == found_key.offset ||
3512 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
3513 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
3514 ref_path->root_generation =
3515 btrfs_ref_generation(leaf, ref);
3517 /* special reference from the tree log */
3518 ref_path->nodes[0] = found_key.offset;
3519 ref_path->current_level = 0;
3526 BUG_ON(ref_path->nodes[level] != 0);
3527 ref_path->nodes[level] = found_key.offset;
3528 ref_path->current_level = level;
3531 * the reference was created in the running transaction,
3532 * no need to continue walking up.
3534 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
3535 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
3536 ref_path->root_generation =
3537 btrfs_ref_generation(leaf, ref);
3542 btrfs_release_path(extent_root, path);
3545 /* reached max tree level, but no tree root found. */
3548 btrfs_free_path(path);
3552 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
3553 struct btrfs_root *extent_root,
3554 struct btrfs_ref_path *ref_path,
3557 memset(ref_path, 0, sizeof(*ref_path));
3558 ref_path->extent_start = extent_start;
3560 return __next_ref_path(trans, extent_root, ref_path, 1);
3563 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
3564 struct btrfs_root *extent_root,
3565 struct btrfs_ref_path *ref_path)
3567 return __next_ref_path(trans, extent_root, ref_path, 0);
3570 static int noinline get_new_locations(struct inode *reloc_inode,
3571 struct btrfs_key *extent_key,
3572 u64 offset, int no_fragment,
3573 struct disk_extent **extents,
3576 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
3577 struct btrfs_path *path;
3578 struct btrfs_file_extent_item *fi;
3579 struct extent_buffer *leaf;
3580 struct disk_extent *exts = *extents;
3581 struct btrfs_key found_key;
3586 int max = *nr_extents;
3589 WARN_ON(!no_fragment && *extents);
3592 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
3597 path = btrfs_alloc_path();
3600 cur_pos = extent_key->objectid - offset;
3601 last_byte = extent_key->objectid + extent_key->offset;
3602 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
3612 leaf = path->nodes[0];
3613 nritems = btrfs_header_nritems(leaf);
3614 if (path->slots[0] >= nritems) {
3615 ret = btrfs_next_leaf(root, path);
3620 leaf = path->nodes[0];
3623 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3624 if (found_key.offset != cur_pos ||
3625 found_key.type != BTRFS_EXTENT_DATA_KEY ||
3626 found_key.objectid != reloc_inode->i_ino)
3629 fi = btrfs_item_ptr(leaf, path->slots[0],
3630 struct btrfs_file_extent_item);
3631 if (btrfs_file_extent_type(leaf, fi) !=
3632 BTRFS_FILE_EXTENT_REG ||
3633 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
3637 struct disk_extent *old = exts;
3639 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
3640 memcpy(exts, old, sizeof(*exts) * nr);
3641 if (old != *extents)
3645 exts[nr].disk_bytenr =
3646 btrfs_file_extent_disk_bytenr(leaf, fi);
3647 exts[nr].disk_num_bytes =
3648 btrfs_file_extent_disk_num_bytes(leaf, fi);
3649 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
3650 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
3651 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
3652 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
3653 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
3654 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
3656 BUG_ON(exts[nr].offset > 0);
3657 BUG_ON(exts[nr].compression || exts[nr].encryption);
3658 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
3660 cur_pos += exts[nr].num_bytes;
3663 if (cur_pos + offset >= last_byte)
3673 WARN_ON(cur_pos + offset > last_byte);
3674 if (cur_pos + offset < last_byte) {
3680 btrfs_free_path(path);
3682 if (exts != *extents)
3691 static int noinline replace_one_extent(struct btrfs_trans_handle *trans,
3692 struct btrfs_root *root,
3693 struct btrfs_path *path,
3694 struct btrfs_key *extent_key,
3695 struct btrfs_key *leaf_key,
3696 struct btrfs_ref_path *ref_path,
3697 struct disk_extent *new_extents,
3700 struct extent_buffer *leaf;
3701 struct btrfs_file_extent_item *fi;
3702 struct inode *inode = NULL;
3703 struct btrfs_key key;
3711 int extent_locked = 0;
3715 memcpy(&key, leaf_key, sizeof(key));
3716 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
3717 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3718 if (key.objectid < ref_path->owner_objectid ||
3719 (key.objectid == ref_path->owner_objectid &&
3720 key.type < BTRFS_EXTENT_DATA_KEY)) {
3721 key.objectid = ref_path->owner_objectid;
3722 key.type = BTRFS_EXTENT_DATA_KEY;
3728 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3732 leaf = path->nodes[0];
3733 nritems = btrfs_header_nritems(leaf);
3735 if (extent_locked && ret > 0) {
3737 * the file extent item was modified by someone
3738 * before the extent got locked.
3740 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
3741 lock_end, GFP_NOFS);
3745 if (path->slots[0] >= nritems) {
3746 if (++nr_scaned > 2)
3749 BUG_ON(extent_locked);
3750 ret = btrfs_next_leaf(root, path);
3755 leaf = path->nodes[0];
3756 nritems = btrfs_header_nritems(leaf);
3759 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3761 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3762 if ((key.objectid > ref_path->owner_objectid) ||
3763 (key.objectid == ref_path->owner_objectid &&
3764 key.type > BTRFS_EXTENT_DATA_KEY) ||
3765 (key.offset >= first_pos + extent_key->offset))
3769 if (inode && key.objectid != inode->i_ino) {
3770 BUG_ON(extent_locked);
3771 btrfs_release_path(root, path);
3772 mutex_unlock(&inode->i_mutex);
3778 if (key.type != BTRFS_EXTENT_DATA_KEY) {
3783 fi = btrfs_item_ptr(leaf, path->slots[0],
3784 struct btrfs_file_extent_item);
3785 extent_type = btrfs_file_extent_type(leaf, fi);
3786 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
3787 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
3788 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3789 extent_key->objectid)) {
3795 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
3796 ext_offset = btrfs_file_extent_offset(leaf, fi);
3798 if (first_pos > key.offset - ext_offset)
3799 first_pos = key.offset - ext_offset;
3801 if (!extent_locked) {
3802 lock_start = key.offset;
3803 lock_end = lock_start + num_bytes - 1;
3805 if (lock_start > key.offset ||
3806 lock_end + 1 < key.offset + num_bytes) {
3807 unlock_extent(&BTRFS_I(inode)->io_tree,
3808 lock_start, lock_end, GFP_NOFS);
3814 btrfs_release_path(root, path);
3816 inode = btrfs_iget_locked(root->fs_info->sb,
3817 key.objectid, root);
3818 if (inode->i_state & I_NEW) {
3819 BTRFS_I(inode)->root = root;
3820 BTRFS_I(inode)->location.objectid =
3822 BTRFS_I(inode)->location.type =
3823 BTRFS_INODE_ITEM_KEY;
3824 BTRFS_I(inode)->location.offset = 0;
3825 btrfs_read_locked_inode(inode);
3826 unlock_new_inode(inode);
3829 * some code call btrfs_commit_transaction while
3830 * holding the i_mutex, so we can't use mutex_lock
3833 if (is_bad_inode(inode) ||
3834 !mutex_trylock(&inode->i_mutex)) {
3837 key.offset = (u64)-1;
3842 if (!extent_locked) {
3843 struct btrfs_ordered_extent *ordered;
3845 btrfs_release_path(root, path);
3847 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
3848 lock_end, GFP_NOFS);
3849 ordered = btrfs_lookup_first_ordered_extent(inode,
3852 ordered->file_offset <= lock_end &&
3853 ordered->file_offset + ordered->len > lock_start) {
3854 unlock_extent(&BTRFS_I(inode)->io_tree,
3855 lock_start, lock_end, GFP_NOFS);
3856 btrfs_start_ordered_extent(inode, ordered, 1);
3857 btrfs_put_ordered_extent(ordered);
3858 key.offset += num_bytes;
3862 btrfs_put_ordered_extent(ordered);
3868 if (nr_extents == 1) {
3869 /* update extent pointer in place */
3870 btrfs_set_file_extent_disk_bytenr(leaf, fi,
3871 new_extents[0].disk_bytenr);
3872 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
3873 new_extents[0].disk_num_bytes);
3874 btrfs_mark_buffer_dirty(leaf);
3876 btrfs_drop_extent_cache(inode, key.offset,
3877 key.offset + num_bytes - 1, 0);
3879 ret = btrfs_inc_extent_ref(trans, root,
3880 new_extents[0].disk_bytenr,
3881 new_extents[0].disk_num_bytes,
3883 root->root_key.objectid,
3888 ret = btrfs_free_extent(trans, root,
3889 extent_key->objectid,
3892 btrfs_header_owner(leaf),
3893 btrfs_header_generation(leaf),
3897 btrfs_release_path(root, path);
3898 key.offset += num_bytes;
3906 * drop old extent pointer at first, then insert the
3907 * new pointers one bye one
3909 btrfs_release_path(root, path);
3910 ret = btrfs_drop_extents(trans, root, inode, key.offset,
3911 key.offset + num_bytes,
3912 key.offset, &alloc_hint);
3915 for (i = 0; i < nr_extents; i++) {
3916 if (ext_offset >= new_extents[i].num_bytes) {
3917 ext_offset -= new_extents[i].num_bytes;
3920 extent_len = min(new_extents[i].num_bytes -
3921 ext_offset, num_bytes);
3923 ret = btrfs_insert_empty_item(trans, root,
3928 leaf = path->nodes[0];
3929 fi = btrfs_item_ptr(leaf, path->slots[0],
3930 struct btrfs_file_extent_item);
3931 btrfs_set_file_extent_generation(leaf, fi,
3933 btrfs_set_file_extent_type(leaf, fi,
3934 BTRFS_FILE_EXTENT_REG);
3935 btrfs_set_file_extent_disk_bytenr(leaf, fi,
3936 new_extents[i].disk_bytenr);
3937 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
3938 new_extents[i].disk_num_bytes);
3939 btrfs_set_file_extent_ram_bytes(leaf, fi,
3940 new_extents[i].ram_bytes);
3942 btrfs_set_file_extent_compression(leaf, fi,
3943 new_extents[i].compression);
3944 btrfs_set_file_extent_encryption(leaf, fi,
3945 new_extents[i].encryption);
3946 btrfs_set_file_extent_other_encoding(leaf, fi,
3947 new_extents[i].other_encoding);
3949 btrfs_set_file_extent_num_bytes(leaf, fi,
3951 ext_offset += new_extents[i].offset;
3952 btrfs_set_file_extent_offset(leaf, fi,
3954 btrfs_mark_buffer_dirty(leaf);
3956 btrfs_drop_extent_cache(inode, key.offset,
3957 key.offset + extent_len - 1, 0);
3959 ret = btrfs_inc_extent_ref(trans, root,
3960 new_extents[i].disk_bytenr,
3961 new_extents[i].disk_num_bytes,
3963 root->root_key.objectid,
3964 trans->transid, key.objectid);
3966 btrfs_release_path(root, path);
3968 inode_add_bytes(inode, extent_len);
3971 num_bytes -= extent_len;
3972 key.offset += extent_len;
3977 BUG_ON(i >= nr_extents);
3981 if (extent_locked) {
3982 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
3983 lock_end, GFP_NOFS);
3987 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
3988 key.offset >= first_pos + extent_key->offset)
3995 btrfs_release_path(root, path);
3997 mutex_unlock(&inode->i_mutex);
3998 if (extent_locked) {
3999 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4000 lock_end, GFP_NOFS);
4007 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4008 struct btrfs_root *root,
4009 struct extent_buffer *buf, u64 orig_start)
4014 BUG_ON(btrfs_header_generation(buf) != trans->transid);
4015 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4017 level = btrfs_header_level(buf);
4019 struct btrfs_leaf_ref *ref;
4020 struct btrfs_leaf_ref *orig_ref;
4022 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4026 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4028 btrfs_free_leaf_ref(root, orig_ref);
4032 ref->nritems = orig_ref->nritems;
4033 memcpy(ref->extents, orig_ref->extents,
4034 sizeof(ref->extents[0]) * ref->nritems);
4036 btrfs_free_leaf_ref(root, orig_ref);
4038 ref->root_gen = trans->transid;
4039 ref->bytenr = buf->start;
4040 ref->owner = btrfs_header_owner(buf);
4041 ref->generation = btrfs_header_generation(buf);
4042 ret = btrfs_add_leaf_ref(root, ref, 0);
4044 btrfs_free_leaf_ref(root, ref);
4049 static int noinline invalidate_extent_cache(struct btrfs_root *root,
4050 struct extent_buffer *leaf,
4051 struct btrfs_block_group_cache *group,
4052 struct btrfs_root *target_root)
4054 struct btrfs_key key;
4055 struct inode *inode = NULL;
4056 struct btrfs_file_extent_item *fi;
4058 u64 skip_objectid = 0;
4062 nritems = btrfs_header_nritems(leaf);
4063 for (i = 0; i < nritems; i++) {
4064 btrfs_item_key_to_cpu(leaf, &key, i);
4065 if (key.objectid == skip_objectid ||
4066 key.type != BTRFS_EXTENT_DATA_KEY)
4068 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4069 if (btrfs_file_extent_type(leaf, fi) ==
4070 BTRFS_FILE_EXTENT_INLINE)
4072 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4074 if (!inode || inode->i_ino != key.objectid) {
4076 inode = btrfs_ilookup(target_root->fs_info->sb,
4077 key.objectid, target_root, 1);
4080 skip_objectid = key.objectid;
4083 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4085 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4086 key.offset + num_bytes - 1, GFP_NOFS);
4087 btrfs_drop_extent_cache(inode, key.offset,
4088 key.offset + num_bytes - 1, 1);
4089 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4090 key.offset + num_bytes - 1, GFP_NOFS);
4097 static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4098 struct btrfs_root *root,
4099 struct extent_buffer *leaf,
4100 struct btrfs_block_group_cache *group,
4101 struct inode *reloc_inode)
4103 struct btrfs_key key;
4104 struct btrfs_key extent_key;
4105 struct btrfs_file_extent_item *fi;
4106 struct btrfs_leaf_ref *ref;
4107 struct disk_extent *new_extent;
4116 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4117 BUG_ON(!new_extent);
4119 ref = btrfs_lookup_leaf_ref(root, leaf->start);
4123 nritems = btrfs_header_nritems(leaf);
4124 for (i = 0; i < nritems; i++) {
4125 btrfs_item_key_to_cpu(leaf, &key, i);
4126 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4128 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4129 if (btrfs_file_extent_type(leaf, fi) ==
4130 BTRFS_FILE_EXTENT_INLINE)
4132 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4133 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4138 if (bytenr >= group->key.objectid + group->key.offset ||
4139 bytenr + num_bytes <= group->key.objectid)
4142 extent_key.objectid = bytenr;
4143 extent_key.offset = num_bytes;
4144 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4146 ret = get_new_locations(reloc_inode, &extent_key,
4147 group->key.objectid, 1,
4148 &new_extent, &nr_extent);
4153 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4154 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4155 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4156 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4158 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4159 new_extent->disk_bytenr);
4160 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4161 new_extent->disk_num_bytes);
4162 btrfs_mark_buffer_dirty(leaf);
4164 ret = btrfs_inc_extent_ref(trans, root,
4165 new_extent->disk_bytenr,
4166 new_extent->disk_num_bytes,
4168 root->root_key.objectid,
4169 trans->transid, key.objectid);
4171 ret = btrfs_free_extent(trans, root,
4172 bytenr, num_bytes, leaf->start,
4173 btrfs_header_owner(leaf),
4174 btrfs_header_generation(leaf),
4180 BUG_ON(ext_index + 1 != ref->nritems);
4181 btrfs_free_leaf_ref(root, ref);
4185 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *root)
4188 struct btrfs_root *reloc_root;
4191 if (root->reloc_root) {
4192 reloc_root = root->reloc_root;
4193 root->reloc_root = NULL;
4194 list_add(&reloc_root->dead_list,
4195 &root->fs_info->dead_reloc_roots);
4197 btrfs_set_root_bytenr(&reloc_root->root_item,
4198 reloc_root->node->start);
4199 btrfs_set_root_level(&root->root_item,
4200 btrfs_header_level(reloc_root->node));
4201 memset(&reloc_root->root_item.drop_progress, 0,
4202 sizeof(struct btrfs_disk_key));
4203 reloc_root->root_item.drop_level = 0;
4205 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4206 &reloc_root->root_key,
4207 &reloc_root->root_item);
4213 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4215 struct btrfs_trans_handle *trans;
4216 struct btrfs_root *reloc_root;
4217 struct btrfs_root *prev_root = NULL;
4218 struct list_head dead_roots;
4222 INIT_LIST_HEAD(&dead_roots);
4223 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4225 while (!list_empty(&dead_roots)) {
4226 reloc_root = list_entry(dead_roots.prev,
4227 struct btrfs_root, dead_list);
4228 list_del_init(&reloc_root->dead_list);
4230 BUG_ON(reloc_root->commit_root != NULL);
4232 trans = btrfs_join_transaction(root, 1);
4235 mutex_lock(&root->fs_info->drop_mutex);
4236 ret = btrfs_drop_snapshot(trans, reloc_root);
4239 mutex_unlock(&root->fs_info->drop_mutex);
4241 nr = trans->blocks_used;
4242 ret = btrfs_end_transaction(trans, root);
4244 btrfs_btree_balance_dirty(root, nr);
4247 free_extent_buffer(reloc_root->node);
4249 ret = btrfs_del_root(trans, root->fs_info->tree_root,
4250 &reloc_root->root_key);
4252 mutex_unlock(&root->fs_info->drop_mutex);
4254 nr = trans->blocks_used;
4255 ret = btrfs_end_transaction(trans, root);
4257 btrfs_btree_balance_dirty(root, nr);
4260 prev_root = reloc_root;
4263 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
4269 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
4271 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
4275 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
4277 struct btrfs_root *reloc_root;
4278 struct btrfs_trans_handle *trans;
4279 struct btrfs_key location;
4283 mutex_lock(&root->fs_info->tree_reloc_mutex);
4284 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
4286 found = !list_empty(&root->fs_info->dead_reloc_roots);
4287 mutex_unlock(&root->fs_info->tree_reloc_mutex);
4290 trans = btrfs_start_transaction(root, 1);
4292 ret = btrfs_commit_transaction(trans, root);
4296 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
4297 location.offset = (u64)-1;
4298 location.type = BTRFS_ROOT_ITEM_KEY;
4300 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
4301 BUG_ON(!reloc_root);
4302 btrfs_orphan_cleanup(reloc_root);
4306 static int noinline init_reloc_tree(struct btrfs_trans_handle *trans,
4307 struct btrfs_root *root)
4309 struct btrfs_root *reloc_root;
4310 struct extent_buffer *eb;
4311 struct btrfs_root_item *root_item;
4312 struct btrfs_key root_key;
4315 BUG_ON(!root->ref_cows);
4316 if (root->reloc_root)
4319 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
4322 ret = btrfs_copy_root(trans, root, root->commit_root,
4323 &eb, BTRFS_TREE_RELOC_OBJECTID);
4326 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4327 root_key.offset = root->root_key.objectid;
4328 root_key.type = BTRFS_ROOT_ITEM_KEY;
4330 memcpy(root_item, &root->root_item, sizeof(root_item));
4331 btrfs_set_root_refs(root_item, 0);
4332 btrfs_set_root_bytenr(root_item, eb->start);
4333 btrfs_set_root_level(root_item, btrfs_header_level(eb));
4334 btrfs_set_root_generation(root_item, trans->transid);
4336 btrfs_tree_unlock(eb);
4337 free_extent_buffer(eb);
4339 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
4340 &root_key, root_item);
4344 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
4346 BUG_ON(!reloc_root);
4347 reloc_root->last_trans = trans->transid;
4348 reloc_root->commit_root = NULL;
4349 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
4351 root->reloc_root = reloc_root;
4356 * Core function of space balance.
4358 * The idea is using reloc trees to relocate tree blocks in reference
4359 * counted roots. There is one reloc tree for each subvol, and all
4360 * reloc trees share same root key objectid. Reloc trees are snapshots
4361 * of the latest committed roots of subvols (root->commit_root).
4363 * To relocate a tree block referenced by a subvol, there are two steps.
4364 * COW the block through subvol's reloc tree, then update block pointer
4365 * in the subvol to point to the new block. Since all reloc trees share
4366 * same root key objectid, doing special handing for tree blocks owned
4367 * by them is easy. Once a tree block has been COWed in one reloc tree,
4368 * we can use the resulting new block directly when the same block is
4369 * required to COW again through other reloc trees. By this way, relocated
4370 * tree blocks are shared between reloc trees, so they are also shared
4373 static int noinline relocate_one_path(struct btrfs_trans_handle *trans,
4374 struct btrfs_root *root,
4375 struct btrfs_path *path,
4376 struct btrfs_key *first_key,
4377 struct btrfs_ref_path *ref_path,
4378 struct btrfs_block_group_cache *group,
4379 struct inode *reloc_inode)
4381 struct btrfs_root *reloc_root;
4382 struct extent_buffer *eb = NULL;
4383 struct btrfs_key *keys;
4387 int lowest_level = 0;
4390 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
4391 lowest_level = ref_path->owner_objectid;
4393 if (!root->ref_cows) {
4394 path->lowest_level = lowest_level;
4395 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
4397 path->lowest_level = 0;
4398 btrfs_release_path(root, path);
4402 mutex_lock(&root->fs_info->tree_reloc_mutex);
4403 ret = init_reloc_tree(trans, root);
4405 reloc_root = root->reloc_root;
4407 shared_level = ref_path->shared_level;
4408 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
4410 keys = ref_path->node_keys;
4411 nodes = ref_path->new_nodes;
4412 memset(&keys[shared_level + 1], 0,
4413 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
4414 memset(&nodes[shared_level + 1], 0,
4415 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
4417 if (nodes[lowest_level] == 0) {
4418 path->lowest_level = lowest_level;
4419 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
4422 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
4423 eb = path->nodes[level];
4424 if (!eb || eb == reloc_root->node)
4426 nodes[level] = eb->start;
4428 btrfs_item_key_to_cpu(eb, &keys[level], 0);
4430 btrfs_node_key_to_cpu(eb, &keys[level], 0);
4432 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
4433 eb = path->nodes[0];
4434 ret = replace_extents_in_leaf(trans, reloc_root, eb,
4435 group, reloc_inode);
4438 btrfs_release_path(reloc_root, path);
4440 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
4446 * replace tree blocks in the fs tree with tree blocks in
4449 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
4452 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
4453 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
4456 extent_buffer_get(path->nodes[0]);
4457 eb = path->nodes[0];
4458 btrfs_release_path(reloc_root, path);
4459 ret = invalidate_extent_cache(reloc_root, eb, group, root);
4461 free_extent_buffer(eb);
4464 mutex_unlock(&root->fs_info->tree_reloc_mutex);
4465 path->lowest_level = 0;
4469 static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
4470 struct btrfs_root *root,
4471 struct btrfs_path *path,
4472 struct btrfs_key *first_key,
4473 struct btrfs_ref_path *ref_path)
4477 ret = relocate_one_path(trans, root, path, first_key,
4478 ref_path, NULL, NULL);
4481 if (root == root->fs_info->extent_root)
4482 btrfs_extent_post_op(trans, root);
4487 static int noinline del_extent_zero(struct btrfs_trans_handle *trans,
4488 struct btrfs_root *extent_root,
4489 struct btrfs_path *path,
4490 struct btrfs_key *extent_key)
4494 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
4497 ret = btrfs_del_item(trans, extent_root, path);
4499 btrfs_release_path(extent_root, path);
4503 static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info,
4504 struct btrfs_ref_path *ref_path)
4506 struct btrfs_key root_key;
4508 root_key.objectid = ref_path->root_objectid;
4509 root_key.type = BTRFS_ROOT_ITEM_KEY;
4510 if (is_cowonly_root(ref_path->root_objectid))
4511 root_key.offset = 0;
4513 root_key.offset = (u64)-1;
4515 return btrfs_read_fs_root_no_name(fs_info, &root_key);
4518 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
4519 struct btrfs_path *path,
4520 struct btrfs_key *extent_key,
4521 struct btrfs_block_group_cache *group,
4522 struct inode *reloc_inode, int pass)
4524 struct btrfs_trans_handle *trans;
4525 struct btrfs_root *found_root;
4526 struct btrfs_ref_path *ref_path = NULL;
4527 struct disk_extent *new_extents = NULL;
4532 struct btrfs_key first_key;
4536 trans = btrfs_start_transaction(extent_root, 1);
4539 if (extent_key->objectid == 0) {
4540 ret = del_extent_zero(trans, extent_root, path, extent_key);
4544 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
4550 for (loops = 0; ; loops++) {
4552 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
4553 extent_key->objectid);
4555 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
4562 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4563 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
4566 found_root = read_ref_root(extent_root->fs_info, ref_path);
4567 BUG_ON(!found_root);
4569 * for reference counted tree, only process reference paths
4570 * rooted at the latest committed root.
4572 if (found_root->ref_cows &&
4573 ref_path->root_generation != found_root->root_key.offset)
4576 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
4579 * copy data extents to new locations
4581 u64 group_start = group->key.objectid;
4582 ret = relocate_data_extent(reloc_inode,
4591 level = ref_path->owner_objectid;
4594 if (prev_block != ref_path->nodes[level]) {
4595 struct extent_buffer *eb;
4596 u64 block_start = ref_path->nodes[level];
4597 u64 block_size = btrfs_level_size(found_root, level);
4599 eb = read_tree_block(found_root, block_start,
4601 btrfs_tree_lock(eb);
4602 BUG_ON(level != btrfs_header_level(eb));
4605 btrfs_item_key_to_cpu(eb, &first_key, 0);
4607 btrfs_node_key_to_cpu(eb, &first_key, 0);
4609 btrfs_tree_unlock(eb);
4610 free_extent_buffer(eb);
4611 prev_block = block_start;
4614 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
4617 * use fallback method to process the remaining
4621 u64 group_start = group->key.objectid;
4622 new_extents = kmalloc(sizeof(*new_extents),
4625 ret = get_new_locations(reloc_inode,
4633 btrfs_record_root_in_trans(found_root);
4634 ret = replace_one_extent(trans, found_root,
4636 &first_key, ref_path,
4637 new_extents, nr_extents);
4643 btrfs_record_root_in_trans(found_root);
4644 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4645 ret = relocate_tree_block(trans, found_root, path,
4646 &first_key, ref_path);
4649 * try to update data extent references while
4650 * keeping metadata shared between snapshots.
4652 ret = relocate_one_path(trans, found_root, path,
4653 &first_key, ref_path,
4654 group, reloc_inode);
4661 btrfs_end_transaction(trans, extent_root);
4667 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
4670 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
4671 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
4673 num_devices = root->fs_info->fs_devices->num_devices;
4674 if (num_devices == 1) {
4675 stripped |= BTRFS_BLOCK_GROUP_DUP;
4676 stripped = flags & ~stripped;
4678 /* turn raid0 into single device chunks */
4679 if (flags & BTRFS_BLOCK_GROUP_RAID0)
4682 /* turn mirroring into duplication */
4683 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
4684 BTRFS_BLOCK_GROUP_RAID10))
4685 return stripped | BTRFS_BLOCK_GROUP_DUP;
4688 /* they already had raid on here, just return */
4689 if (flags & stripped)
4692 stripped |= BTRFS_BLOCK_GROUP_DUP;
4693 stripped = flags & ~stripped;
4695 /* switch duplicated blocks with raid1 */
4696 if (flags & BTRFS_BLOCK_GROUP_DUP)
4697 return stripped | BTRFS_BLOCK_GROUP_RAID1;
4699 /* turn single device chunks into raid0 */
4700 return stripped | BTRFS_BLOCK_GROUP_RAID0;
4705 int __alloc_chunk_for_shrink(struct btrfs_root *root,
4706 struct btrfs_block_group_cache *shrink_block_group,
4709 struct btrfs_trans_handle *trans;
4710 u64 new_alloc_flags;
4713 spin_lock(&shrink_block_group->lock);
4714 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
4715 spin_unlock(&shrink_block_group->lock);
4717 trans = btrfs_start_transaction(root, 1);
4718 spin_lock(&shrink_block_group->lock);
4720 new_alloc_flags = update_block_group_flags(root,
4721 shrink_block_group->flags);
4722 if (new_alloc_flags != shrink_block_group->flags) {
4724 btrfs_block_group_used(&shrink_block_group->item);
4726 calc = shrink_block_group->key.offset;
4728 spin_unlock(&shrink_block_group->lock);
4730 do_chunk_alloc(trans, root->fs_info->extent_root,
4731 calc + 2 * 1024 * 1024, new_alloc_flags, force);
4733 btrfs_end_transaction(trans, root);
4735 spin_unlock(&shrink_block_group->lock);
4739 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4740 struct btrfs_root *root,
4741 u64 objectid, u64 size)
4743 struct btrfs_path *path;
4744 struct btrfs_inode_item *item;
4745 struct extent_buffer *leaf;
4748 path = btrfs_alloc_path();
4752 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4756 leaf = path->nodes[0];
4757 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4758 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
4759 btrfs_set_inode_generation(leaf, item, 1);
4760 btrfs_set_inode_size(leaf, item, size);
4761 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4762 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM |
4763 BTRFS_INODE_NOCOMPRESS);
4764 btrfs_mark_buffer_dirty(leaf);
4765 btrfs_release_path(root, path);
4767 btrfs_free_path(path);
4771 static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info,
4772 struct btrfs_block_group_cache *group)
4774 struct inode *inode = NULL;
4775 struct btrfs_trans_handle *trans;
4776 struct btrfs_root *root;
4777 struct btrfs_key root_key;
4778 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
4781 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
4782 root_key.type = BTRFS_ROOT_ITEM_KEY;
4783 root_key.offset = (u64)-1;
4784 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
4786 return ERR_CAST(root);
4788 trans = btrfs_start_transaction(root, 1);
4791 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
4795 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
4798 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
4799 group->key.offset, 0, group->key.offset,
4803 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
4804 if (inode->i_state & I_NEW) {
4805 BTRFS_I(inode)->root = root;
4806 BTRFS_I(inode)->location.objectid = objectid;
4807 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
4808 BTRFS_I(inode)->location.offset = 0;
4809 btrfs_read_locked_inode(inode);
4810 unlock_new_inode(inode);
4811 BUG_ON(is_bad_inode(inode));
4816 err = btrfs_orphan_add(trans, inode);
4818 btrfs_end_transaction(trans, root);
4822 inode = ERR_PTR(err);
4827 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
4829 struct btrfs_trans_handle *trans;
4830 struct btrfs_path *path;
4831 struct btrfs_fs_info *info = root->fs_info;
4832 struct extent_buffer *leaf;
4833 struct inode *reloc_inode;
4834 struct btrfs_block_group_cache *block_group;
4835 struct btrfs_key key;
4844 root = root->fs_info->extent_root;
4846 block_group = btrfs_lookup_block_group(info, group_start);
4847 BUG_ON(!block_group);
4849 printk("btrfs relocating block group %llu flags %llu\n",
4850 (unsigned long long)block_group->key.objectid,
4851 (unsigned long long)block_group->flags);
4853 path = btrfs_alloc_path();
4856 reloc_inode = create_reloc_inode(info, block_group);
4857 BUG_ON(IS_ERR(reloc_inode));
4859 __alloc_chunk_for_shrink(root, block_group, 1);
4860 block_group->ro = 1;
4861 block_group->space_info->total_bytes -= block_group->key.offset;
4863 btrfs_start_delalloc_inodes(info->tree_root);
4864 btrfs_wait_ordered_extents(info->tree_root, 0);
4869 key.objectid = block_group->key.objectid;
4872 cur_byte = key.objectid;
4874 trans = btrfs_start_transaction(info->tree_root, 1);
4875 btrfs_commit_transaction(trans, info->tree_root);
4877 mutex_lock(&root->fs_info->cleaner_mutex);
4878 btrfs_clean_old_snapshots(info->tree_root);
4879 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
4880 mutex_unlock(&root->fs_info->cleaner_mutex);
4883 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4887 leaf = path->nodes[0];
4888 nritems = btrfs_header_nritems(leaf);
4889 if (path->slots[0] >= nritems) {
4890 ret = btrfs_next_leaf(root, path);
4897 leaf = path->nodes[0];
4898 nritems = btrfs_header_nritems(leaf);
4901 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4903 if (key.objectid >= block_group->key.objectid +
4904 block_group->key.offset)
4907 if (progress && need_resched()) {
4908 btrfs_release_path(root, path);
4915 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
4916 key.objectid + key.offset <= cur_byte) {
4922 cur_byte = key.objectid + key.offset;
4923 btrfs_release_path(root, path);
4925 __alloc_chunk_for_shrink(root, block_group, 0);
4926 ret = relocate_one_extent(root, path, &key, block_group,
4932 key.objectid = cur_byte;
4937 btrfs_release_path(root, path);
4940 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
4941 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
4942 WARN_ON(reloc_inode->i_mapping->nrpages);
4945 if (total_found > 0) {
4946 printk("btrfs found %llu extents in pass %d\n",
4947 (unsigned long long)total_found, pass);
4949 if (total_found == skipped && pass > 2) {
4951 reloc_inode = create_reloc_inode(info, block_group);
4957 /* delete reloc_inode */
4960 /* unpin extents in this range */
4961 trans = btrfs_start_transaction(info->tree_root, 1);
4962 btrfs_commit_transaction(trans, info->tree_root);
4964 spin_lock(&block_group->lock);
4965 WARN_ON(block_group->pinned > 0);
4966 WARN_ON(block_group->reserved > 0);
4967 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
4968 spin_unlock(&block_group->lock);
4971 btrfs_free_path(path);
4975 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
4976 struct btrfs_key *key)
4979 struct btrfs_key found_key;
4980 struct extent_buffer *leaf;
4983 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
4988 slot = path->slots[0];
4989 leaf = path->nodes[0];
4990 if (slot >= btrfs_header_nritems(leaf)) {
4991 ret = btrfs_next_leaf(root, path);
4998 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5000 if (found_key.objectid >= key->objectid &&
5001 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5012 int btrfs_free_block_groups(struct btrfs_fs_info *info)
5014 struct btrfs_block_group_cache *block_group;
5017 spin_lock(&info->block_group_cache_lock);
5018 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5019 block_group = rb_entry(n, struct btrfs_block_group_cache,
5021 rb_erase(&block_group->cache_node,
5022 &info->block_group_cache_tree);
5023 spin_unlock(&info->block_group_cache_lock);
5025 btrfs_remove_free_space_cache(block_group);
5026 down_write(&block_group->space_info->groups_sem);
5027 list_del(&block_group->list);
5028 up_write(&block_group->space_info->groups_sem);
5031 spin_lock(&info->block_group_cache_lock);
5033 spin_unlock(&info->block_group_cache_lock);
5037 int btrfs_read_block_groups(struct btrfs_root *root)
5039 struct btrfs_path *path;
5041 struct btrfs_block_group_cache *cache;
5042 struct btrfs_fs_info *info = root->fs_info;
5043 struct btrfs_space_info *space_info;
5044 struct btrfs_key key;
5045 struct btrfs_key found_key;
5046 struct extent_buffer *leaf;
5048 root = info->extent_root;
5051 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5052 path = btrfs_alloc_path();
5057 ret = find_first_block_group(root, path, &key);
5065 leaf = path->nodes[0];
5066 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5067 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5073 spin_lock_init(&cache->lock);
5074 mutex_init(&cache->alloc_mutex);
5075 INIT_LIST_HEAD(&cache->list);
5076 read_extent_buffer(leaf, &cache->item,
5077 btrfs_item_ptr_offset(leaf, path->slots[0]),
5078 sizeof(cache->item));
5079 memcpy(&cache->key, &found_key, sizeof(found_key));
5081 key.objectid = found_key.objectid + found_key.offset;
5082 btrfs_release_path(root, path);
5083 cache->flags = btrfs_block_group_flags(&cache->item);
5085 ret = update_space_info(info, cache->flags, found_key.offset,
5086 btrfs_block_group_used(&cache->item),
5089 cache->space_info = space_info;
5090 down_write(&space_info->groups_sem);
5091 list_add_tail(&cache->list, &space_info->block_groups);
5092 up_write(&space_info->groups_sem);
5094 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5097 set_avail_alloc_bits(root->fs_info, cache->flags);
5101 btrfs_free_path(path);
5105 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5106 struct btrfs_root *root, u64 bytes_used,
5107 u64 type, u64 chunk_objectid, u64 chunk_offset,
5111 struct btrfs_root *extent_root;
5112 struct btrfs_block_group_cache *cache;
5114 extent_root = root->fs_info->extent_root;
5116 root->fs_info->last_trans_new_blockgroup = trans->transid;
5118 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5122 cache->key.objectid = chunk_offset;
5123 cache->key.offset = size;
5124 spin_lock_init(&cache->lock);
5125 mutex_init(&cache->alloc_mutex);
5126 INIT_LIST_HEAD(&cache->list);
5127 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5129 btrfs_set_block_group_used(&cache->item, bytes_used);
5130 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5131 cache->flags = type;
5132 btrfs_set_block_group_flags(&cache->item, type);
5134 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5135 &cache->space_info);
5137 down_write(&cache->space_info->groups_sem);
5138 list_add_tail(&cache->list, &cache->space_info->block_groups);
5139 up_write(&cache->space_info->groups_sem);
5141 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5144 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5145 sizeof(cache->item));
5148 finish_current_insert(trans, extent_root, 0);
5149 ret = del_pending_extents(trans, extent_root, 0);
5151 set_avail_alloc_bits(extent_root->fs_info, type);
5156 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5157 struct btrfs_root *root, u64 group_start)
5159 struct btrfs_path *path;
5160 struct btrfs_block_group_cache *block_group;
5161 struct btrfs_key key;
5164 root = root->fs_info->extent_root;
5166 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
5167 BUG_ON(!block_group);
5169 memcpy(&key, &block_group->key, sizeof(key));
5171 path = btrfs_alloc_path();
5174 btrfs_remove_free_space_cache(block_group);
5175 rb_erase(&block_group->cache_node,
5176 &root->fs_info->block_group_cache_tree);
5177 down_write(&block_group->space_info->groups_sem);
5178 list_del(&block_group->list);
5179 up_write(&block_group->space_info->groups_sem);
5182 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
5183 kfree(shrink_block_group);
5186 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5192 ret = btrfs_del_item(trans, root, path);
5194 btrfs_free_path(path);