2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
45 struct btrfs_path *btrfs_alloc_path(void)
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
91 btrfs_set_lock_blocking_rw(held, held_rw);
92 if (held_rw == BTRFS_WRITE_LOCK)
93 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
94 else if (held_rw == BTRFS_READ_LOCK)
95 held_rw = BTRFS_READ_LOCK_BLOCKING;
97 btrfs_set_path_blocking(p);
100 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 if (p->nodes[i] && p->locks[i]) {
102 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
103 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
104 p->locks[i] = BTRFS_WRITE_LOCK;
105 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
106 p->locks[i] = BTRFS_READ_LOCK;
110 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 btrfs_clear_lock_blocking_rw(held, held_rw);
116 /* this also releases the path */
117 void btrfs_free_path(struct btrfs_path *p)
121 btrfs_release_path(p);
122 kmem_cache_free(btrfs_path_cachep, p);
126 * path release drops references on the extent buffers in the path
127 * and it drops any locks held by this path
129 * It is safe to call this on paths that no locks or extent buffers held.
131 noinline void btrfs_release_path(struct btrfs_path *p)
135 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
140 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
143 free_extent_buffer(p->nodes[i]);
149 * safely gets a reference on the root node of a tree. A lock
150 * is not taken, so a concurrent writer may put a different node
151 * at the root of the tree. See btrfs_lock_root_node for the
154 * The extent buffer returned by this has a reference taken, so
155 * it won't disappear. It may stop being the root of the tree
156 * at any time because there are no locks held.
158 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
160 struct extent_buffer *eb;
164 eb = rcu_dereference(root->node);
167 * RCU really hurts here, we could free up the root node because
168 * it was cow'ed but we may not get the new root node yet so do
169 * the inc_not_zero dance and if it doesn't work then
170 * synchronize_rcu and try again.
172 if (atomic_inc_not_zero(&eb->refs)) {
182 /* loop around taking references on and locking the root node of the
183 * tree until you end up with a lock on the root. A locked buffer
184 * is returned, with a reference held.
186 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
188 struct extent_buffer *eb;
191 eb = btrfs_root_node(root);
193 if (eb == root->node)
195 btrfs_tree_unlock(eb);
196 free_extent_buffer(eb);
201 /* loop around taking references on and locking the root node of the
202 * tree until you end up with a lock on the root. A locked buffer
203 * is returned, with a reference held.
205 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
207 struct extent_buffer *eb;
210 eb = btrfs_root_node(root);
211 btrfs_tree_read_lock(eb);
212 if (eb == root->node)
214 btrfs_tree_read_unlock(eb);
215 free_extent_buffer(eb);
220 /* cowonly root (everything not a reference counted cow subvolume), just get
221 * put onto a simple dirty list. transaction.c walks this to make sure they
222 * get properly updated on disk.
224 static void add_root_to_dirty_list(struct btrfs_root *root)
226 spin_lock(&root->fs_info->trans_lock);
227 if (root->track_dirty && list_empty(&root->dirty_list)) {
228 list_add(&root->dirty_list,
229 &root->fs_info->dirty_cowonly_roots);
231 spin_unlock(&root->fs_info->trans_lock);
235 * used by snapshot creation to make a copy of a root for a tree with
236 * a given objectid. The buffer with the new root node is returned in
237 * cow_ret, and this func returns zero on success or a negative error code.
239 int btrfs_copy_root(struct btrfs_trans_handle *trans,
240 struct btrfs_root *root,
241 struct extent_buffer *buf,
242 struct extent_buffer **cow_ret, u64 new_root_objectid)
244 struct extent_buffer *cow;
247 struct btrfs_disk_key disk_key;
249 WARN_ON(root->ref_cows && trans->transid !=
250 root->fs_info->running_transaction->transid);
251 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
253 level = btrfs_header_level(buf);
255 btrfs_item_key(buf, &disk_key, 0);
257 btrfs_node_key(buf, &disk_key, 0);
259 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
260 new_root_objectid, &disk_key, level,
265 copy_extent_buffer(cow, buf, 0, 0, cow->len);
266 btrfs_set_header_bytenr(cow, cow->start);
267 btrfs_set_header_generation(cow, trans->transid);
268 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 BTRFS_HEADER_FLAG_RELOC);
271 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 btrfs_set_header_owner(cow, new_root_objectid);
276 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
279 WARN_ON(btrfs_header_generation(buf) > trans->transid);
280 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
281 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
283 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
288 btrfs_mark_buffer_dirty(cow);
297 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
298 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
300 MOD_LOG_ROOT_REPLACE,
303 struct tree_mod_move {
308 struct tree_mod_root {
313 struct tree_mod_elem {
315 u64 index; /* shifted logical */
319 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
326 struct btrfs_disk_key key;
329 /* this is used for op == MOD_LOG_MOVE_KEYS */
330 struct tree_mod_move move;
332 /* this is used for op == MOD_LOG_ROOT_REPLACE */
333 struct tree_mod_root old_root;
336 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
338 read_lock(&fs_info->tree_mod_log_lock);
341 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
343 read_unlock(&fs_info->tree_mod_log_lock);
346 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
348 write_lock(&fs_info->tree_mod_log_lock);
351 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
353 write_unlock(&fs_info->tree_mod_log_lock);
357 * Increment the upper half of tree_mod_seq, set lower half zero.
359 * Must be called with fs_info->tree_mod_seq_lock held.
361 static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
363 u64 seq = atomic64_read(&fs_info->tree_mod_seq);
364 seq &= 0xffffffff00000000ull;
366 atomic64_set(&fs_info->tree_mod_seq, seq);
371 * Increment the lower half of tree_mod_seq.
373 * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
374 * are generated should not technically require a spin lock here. (Rationale:
375 * incrementing the minor while incrementing the major seq number is between its
376 * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
377 * just returns a unique sequence number as usual.) We have decided to leave
378 * that requirement in here and rethink it once we notice it really imposes a
379 * problem on some workload.
381 static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
383 return atomic64_inc_return(&fs_info->tree_mod_seq);
387 * return the last minor in the previous major tree_mod_seq number
389 u64 btrfs_tree_mod_seq_prev(u64 seq)
391 return (seq & 0xffffffff00000000ull) - 1ull;
395 * This adds a new blocker to the tree mod log's blocker list if the @elem
396 * passed does not already have a sequence number set. So when a caller expects
397 * to record tree modifications, it should ensure to set elem->seq to zero
398 * before calling btrfs_get_tree_mod_seq.
399 * Returns a fresh, unused tree log modification sequence number, even if no new
402 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
403 struct seq_list *elem)
407 tree_mod_log_write_lock(fs_info);
408 spin_lock(&fs_info->tree_mod_seq_lock);
410 elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
411 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
413 seq = btrfs_inc_tree_mod_seq_minor(fs_info);
414 spin_unlock(&fs_info->tree_mod_seq_lock);
415 tree_mod_log_write_unlock(fs_info);
420 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
421 struct seq_list *elem)
423 struct rb_root *tm_root;
424 struct rb_node *node;
425 struct rb_node *next;
426 struct seq_list *cur_elem;
427 struct tree_mod_elem *tm;
428 u64 min_seq = (u64)-1;
429 u64 seq_putting = elem->seq;
434 spin_lock(&fs_info->tree_mod_seq_lock);
435 list_del(&elem->list);
438 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
439 if (cur_elem->seq < min_seq) {
440 if (seq_putting > cur_elem->seq) {
442 * blocker with lower sequence number exists, we
443 * cannot remove anything from the log
445 spin_unlock(&fs_info->tree_mod_seq_lock);
448 min_seq = cur_elem->seq;
451 spin_unlock(&fs_info->tree_mod_seq_lock);
454 * anything that's lower than the lowest existing (read: blocked)
455 * sequence number can be removed from the tree.
457 tree_mod_log_write_lock(fs_info);
458 tm_root = &fs_info->tree_mod_log;
459 for (node = rb_first(tm_root); node; node = next) {
460 next = rb_next(node);
461 tm = container_of(node, struct tree_mod_elem, node);
462 if (tm->seq > min_seq)
464 rb_erase(node, tm_root);
467 tree_mod_log_write_unlock(fs_info);
471 * key order of the log:
474 * the index is the shifted logical of the *new* root node for root replace
475 * operations, or the shifted logical of the affected block for all other
479 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
481 struct rb_root *tm_root;
482 struct rb_node **new;
483 struct rb_node *parent = NULL;
484 struct tree_mod_elem *cur;
489 tree_mod_log_write_lock(fs_info);
490 if (list_empty(&fs_info->tree_mod_seq_list)) {
491 tree_mod_log_write_unlock(fs_info);
493 * Ok we no longer care about logging modifications, free up tm
494 * and return 0. Any callers shouldn't be using tm after
495 * calling tree_mod_log_insert, but if they do we can just
496 * change this to return a special error code to let the callers
497 * do their own thing.
503 spin_lock(&fs_info->tree_mod_seq_lock);
504 tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
505 spin_unlock(&fs_info->tree_mod_seq_lock);
507 tm_root = &fs_info->tree_mod_log;
508 new = &tm_root->rb_node;
510 cur = container_of(*new, struct tree_mod_elem, node);
512 if (cur->index < tm->index)
513 new = &((*new)->rb_left);
514 else if (cur->index > tm->index)
515 new = &((*new)->rb_right);
516 else if (cur->seq < tm->seq)
517 new = &((*new)->rb_left);
518 else if (cur->seq > tm->seq)
519 new = &((*new)->rb_right);
527 rb_link_node(&tm->node, parent, new);
528 rb_insert_color(&tm->node, tm_root);
530 tree_mod_log_write_unlock(fs_info);
535 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
536 * returns zero with the tree_mod_log_lock acquired. The caller must hold
537 * this until all tree mod log insertions are recorded in the rb tree and then
538 * call tree_mod_log_write_unlock() to release.
540 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
541 struct extent_buffer *eb) {
543 if (list_empty(&(fs_info)->tree_mod_seq_list))
545 if (eb && btrfs_header_level(eb) == 0)
551 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
552 struct extent_buffer *eb, int slot,
553 enum mod_log_op op, gfp_t flags)
555 struct tree_mod_elem *tm;
557 tm = kzalloc(sizeof(*tm), flags);
561 tm->index = eb->start >> PAGE_CACHE_SHIFT;
562 if (op != MOD_LOG_KEY_ADD) {
563 btrfs_node_key(eb, &tm->key, slot);
564 tm->blockptr = btrfs_node_blockptr(eb, slot);
568 tm->generation = btrfs_node_ptr_generation(eb, slot);
570 return __tree_mod_log_insert(fs_info, tm);
574 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
575 struct extent_buffer *eb, int slot,
576 enum mod_log_op op, gfp_t flags)
578 if (tree_mod_dont_log(fs_info, eb))
581 return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
585 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
586 struct extent_buffer *eb, int dst_slot, int src_slot,
587 int nr_items, gfp_t flags)
589 struct tree_mod_elem *tm;
593 if (tree_mod_dont_log(fs_info, eb))
597 * When we override something during the move, we log these removals.
598 * This can only happen when we move towards the beginning of the
599 * buffer, i.e. dst_slot < src_slot.
601 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
602 ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
603 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
607 tm = kzalloc(sizeof(*tm), flags);
611 tm->index = eb->start >> PAGE_CACHE_SHIFT;
613 tm->move.dst_slot = dst_slot;
614 tm->move.nr_items = nr_items;
615 tm->op = MOD_LOG_MOVE_KEYS;
617 return __tree_mod_log_insert(fs_info, tm);
621 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
627 if (btrfs_header_level(eb) == 0)
630 nritems = btrfs_header_nritems(eb);
631 for (i = nritems - 1; i >= 0; i--) {
632 ret = __tree_mod_log_insert_key(fs_info, eb, i,
633 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
639 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
640 struct extent_buffer *old_root,
641 struct extent_buffer *new_root, gfp_t flags,
644 struct tree_mod_elem *tm;
646 if (tree_mod_dont_log(fs_info, NULL))
650 __tree_mod_log_free_eb(fs_info, old_root);
652 tm = kzalloc(sizeof(*tm), flags);
656 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
657 tm->old_root.logical = old_root->start;
658 tm->old_root.level = btrfs_header_level(old_root);
659 tm->generation = btrfs_header_generation(old_root);
660 tm->op = MOD_LOG_ROOT_REPLACE;
662 return __tree_mod_log_insert(fs_info, tm);
665 static struct tree_mod_elem *
666 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
669 struct rb_root *tm_root;
670 struct rb_node *node;
671 struct tree_mod_elem *cur = NULL;
672 struct tree_mod_elem *found = NULL;
673 u64 index = start >> PAGE_CACHE_SHIFT;
675 tree_mod_log_read_lock(fs_info);
676 tm_root = &fs_info->tree_mod_log;
677 node = tm_root->rb_node;
679 cur = container_of(node, struct tree_mod_elem, node);
680 if (cur->index < index) {
681 node = node->rb_left;
682 } else if (cur->index > index) {
683 node = node->rb_right;
684 } else if (cur->seq < min_seq) {
685 node = node->rb_left;
686 } else if (!smallest) {
687 /* we want the node with the highest seq */
689 BUG_ON(found->seq > cur->seq);
691 node = node->rb_left;
692 } else if (cur->seq > min_seq) {
693 /* we want the node with the smallest seq */
695 BUG_ON(found->seq < cur->seq);
697 node = node->rb_right;
703 tree_mod_log_read_unlock(fs_info);
709 * this returns the element from the log with the smallest time sequence
710 * value that's in the log (the oldest log item). any element with a time
711 * sequence lower than min_seq will be ignored.
713 static struct tree_mod_elem *
714 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
717 return __tree_mod_log_search(fs_info, start, min_seq, 1);
721 * this returns the element from the log with the largest time sequence
722 * value that's in the log (the most recent log item). any element with
723 * a time sequence lower than min_seq will be ignored.
725 static struct tree_mod_elem *
726 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
728 return __tree_mod_log_search(fs_info, start, min_seq, 0);
732 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
733 struct extent_buffer *src, unsigned long dst_offset,
734 unsigned long src_offset, int nr_items)
739 if (tree_mod_dont_log(fs_info, NULL))
742 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
745 for (i = 0; i < nr_items; i++) {
746 ret = __tree_mod_log_insert_key(fs_info, src,
748 MOD_LOG_KEY_REMOVE, GFP_NOFS);
750 ret = __tree_mod_log_insert_key(fs_info, dst,
759 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
760 int dst_offset, int src_offset, int nr_items)
763 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
769 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
770 struct extent_buffer *eb, int slot, int atomic)
774 ret = __tree_mod_log_insert_key(fs_info, eb, slot,
776 atomic ? GFP_ATOMIC : GFP_NOFS);
781 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
783 if (tree_mod_dont_log(fs_info, eb))
785 __tree_mod_log_free_eb(fs_info, eb);
789 tree_mod_log_set_root_pointer(struct btrfs_root *root,
790 struct extent_buffer *new_root_node,
794 ret = tree_mod_log_insert_root(root->fs_info, root->node,
795 new_root_node, GFP_NOFS, log_removal);
800 * check if the tree block can be shared by multiple trees
802 int btrfs_block_can_be_shared(struct btrfs_root *root,
803 struct extent_buffer *buf)
806 * Tree blocks not in refernece counted trees and tree roots
807 * are never shared. If a block was allocated after the last
808 * snapshot and the block was not allocated by tree relocation,
809 * we know the block is not shared.
811 if (root->ref_cows &&
812 buf != root->node && buf != root->commit_root &&
813 (btrfs_header_generation(buf) <=
814 btrfs_root_last_snapshot(&root->root_item) ||
815 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
817 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
818 if (root->ref_cows &&
819 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
825 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
826 struct btrfs_root *root,
827 struct extent_buffer *buf,
828 struct extent_buffer *cow,
838 * Backrefs update rules:
840 * Always use full backrefs for extent pointers in tree block
841 * allocated by tree relocation.
843 * If a shared tree block is no longer referenced by its owner
844 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
845 * use full backrefs for extent pointers in tree block.
847 * If a tree block is been relocating
848 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
849 * use full backrefs for extent pointers in tree block.
850 * The reason for this is some operations (such as drop tree)
851 * are only allowed for blocks use full backrefs.
854 if (btrfs_block_can_be_shared(root, buf)) {
855 ret = btrfs_lookup_extent_info(trans, root, buf->start,
856 btrfs_header_level(buf), 1,
862 btrfs_std_error(root->fs_info, ret);
867 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
868 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
869 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
874 owner = btrfs_header_owner(buf);
875 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
876 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
879 if ((owner == root->root_key.objectid ||
880 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
881 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
882 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
883 BUG_ON(ret); /* -ENOMEM */
885 if (root->root_key.objectid ==
886 BTRFS_TREE_RELOC_OBJECTID) {
887 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
888 BUG_ON(ret); /* -ENOMEM */
889 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
890 BUG_ON(ret); /* -ENOMEM */
892 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
895 if (root->root_key.objectid ==
896 BTRFS_TREE_RELOC_OBJECTID)
897 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
899 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
900 BUG_ON(ret); /* -ENOMEM */
902 if (new_flags != 0) {
903 int level = btrfs_header_level(buf);
905 ret = btrfs_set_disk_extent_flags(trans, root,
908 new_flags, level, 0);
913 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
914 if (root->root_key.objectid ==
915 BTRFS_TREE_RELOC_OBJECTID)
916 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
918 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
919 BUG_ON(ret); /* -ENOMEM */
920 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
921 BUG_ON(ret); /* -ENOMEM */
923 clean_tree_block(trans, root, buf);
930 * does the dirty work in cow of a single block. The parent block (if
931 * supplied) is updated to point to the new cow copy. The new buffer is marked
932 * dirty and returned locked. If you modify the block it needs to be marked
935 * search_start -- an allocation hint for the new block
937 * empty_size -- a hint that you plan on doing more cow. This is the size in
938 * bytes the allocator should try to find free next to the block it returns.
939 * This is just a hint and may be ignored by the allocator.
941 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
942 struct btrfs_root *root,
943 struct extent_buffer *buf,
944 struct extent_buffer *parent, int parent_slot,
945 struct extent_buffer **cow_ret,
946 u64 search_start, u64 empty_size)
948 struct btrfs_disk_key disk_key;
949 struct extent_buffer *cow;
958 btrfs_assert_tree_locked(buf);
960 WARN_ON(root->ref_cows && trans->transid !=
961 root->fs_info->running_transaction->transid);
962 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
964 level = btrfs_header_level(buf);
967 btrfs_item_key(buf, &disk_key, 0);
969 btrfs_node_key(buf, &disk_key, 0);
971 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
973 parent_start = parent->start;
979 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
980 root->root_key.objectid, &disk_key,
981 level, search_start, empty_size);
985 /* cow is set to blocking by btrfs_init_new_buffer */
987 copy_extent_buffer(cow, buf, 0, 0, cow->len);
988 btrfs_set_header_bytenr(cow, cow->start);
989 btrfs_set_header_generation(cow, trans->transid);
990 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
991 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
992 BTRFS_HEADER_FLAG_RELOC);
993 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
994 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
996 btrfs_set_header_owner(cow, root->root_key.objectid);
998 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1001 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1003 btrfs_abort_transaction(trans, root, ret);
1007 if (root->ref_cows) {
1008 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1013 if (buf == root->node) {
1014 WARN_ON(parent && parent != buf);
1015 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1016 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1017 parent_start = buf->start;
1021 extent_buffer_get(cow);
1022 tree_mod_log_set_root_pointer(root, cow, 1);
1023 rcu_assign_pointer(root->node, cow);
1025 btrfs_free_tree_block(trans, root, buf, parent_start,
1027 free_extent_buffer(buf);
1028 add_root_to_dirty_list(root);
1030 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1031 parent_start = parent->start;
1035 WARN_ON(trans->transid != btrfs_header_generation(parent));
1036 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1037 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1038 btrfs_set_node_blockptr(parent, parent_slot,
1040 btrfs_set_node_ptr_generation(parent, parent_slot,
1042 btrfs_mark_buffer_dirty(parent);
1044 tree_mod_log_free_eb(root->fs_info, buf);
1045 btrfs_free_tree_block(trans, root, buf, parent_start,
1049 btrfs_tree_unlock(buf);
1050 free_extent_buffer_stale(buf);
1051 btrfs_mark_buffer_dirty(cow);
1057 * returns the logical address of the oldest predecessor of the given root.
1058 * entries older than time_seq are ignored.
1060 static struct tree_mod_elem *
1061 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1062 struct extent_buffer *eb_root, u64 time_seq)
1064 struct tree_mod_elem *tm;
1065 struct tree_mod_elem *found = NULL;
1066 u64 root_logical = eb_root->start;
1073 * the very last operation that's logged for a root is the replacement
1074 * operation (if it is replaced at all). this has the index of the *new*
1075 * root, making it the very first operation that's logged for this root.
1078 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1083 * if there are no tree operation for the oldest root, we simply
1084 * return it. this should only happen if that (old) root is at
1091 * if there's an operation that's not a root replacement, we
1092 * found the oldest version of our root. normally, we'll find a
1093 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1095 if (tm->op != MOD_LOG_ROOT_REPLACE)
1099 root_logical = tm->old_root.logical;
1103 /* if there's no old root to return, return what we found instead */
1111 * tm is a pointer to the first operation to rewind within eb. then, all
1112 * previous operations will be rewinded (until we reach something older than
1116 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1117 u64 time_seq, struct tree_mod_elem *first_tm)
1120 struct rb_node *next;
1121 struct tree_mod_elem *tm = first_tm;
1122 unsigned long o_dst;
1123 unsigned long o_src;
1124 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1126 n = btrfs_header_nritems(eb);
1127 tree_mod_log_read_lock(fs_info);
1128 while (tm && tm->seq >= time_seq) {
1130 * all the operations are recorded with the operator used for
1131 * the modification. as we're going backwards, we do the
1132 * opposite of each operation here.
1135 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1136 BUG_ON(tm->slot < n);
1138 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1139 case MOD_LOG_KEY_REMOVE:
1140 btrfs_set_node_key(eb, &tm->key, tm->slot);
1141 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1142 btrfs_set_node_ptr_generation(eb, tm->slot,
1146 case MOD_LOG_KEY_REPLACE:
1147 BUG_ON(tm->slot >= n);
1148 btrfs_set_node_key(eb, &tm->key, tm->slot);
1149 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1150 btrfs_set_node_ptr_generation(eb, tm->slot,
1153 case MOD_LOG_KEY_ADD:
1154 /* if a move operation is needed it's in the log */
1157 case MOD_LOG_MOVE_KEYS:
1158 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1159 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1160 memmove_extent_buffer(eb, o_dst, o_src,
1161 tm->move.nr_items * p_size);
1163 case MOD_LOG_ROOT_REPLACE:
1165 * this operation is special. for roots, this must be
1166 * handled explicitly before rewinding.
1167 * for non-roots, this operation may exist if the node
1168 * was a root: root A -> child B; then A gets empty and
1169 * B is promoted to the new root. in the mod log, we'll
1170 * have a root-replace operation for B, a tree block
1171 * that is no root. we simply ignore that operation.
1175 next = rb_next(&tm->node);
1178 tm = container_of(next, struct tree_mod_elem, node);
1179 if (tm->index != first_tm->index)
1182 tree_mod_log_read_unlock(fs_info);
1183 btrfs_set_header_nritems(eb, n);
1187 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1188 * is returned. If rewind operations happen, a fresh buffer is returned. The
1189 * returned buffer is always read-locked. If the returned buffer is not the
1190 * input buffer, the lock on the input buffer is released and the input buffer
1191 * is freed (its refcount is decremented).
1193 static struct extent_buffer *
1194 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1195 struct extent_buffer *eb, u64 time_seq)
1197 struct extent_buffer *eb_rewin;
1198 struct tree_mod_elem *tm;
1203 if (btrfs_header_level(eb) == 0)
1206 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1210 btrfs_set_path_blocking(path);
1211 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1213 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1214 BUG_ON(tm->slot != 0);
1215 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1216 fs_info->tree_root->nodesize);
1218 btrfs_tree_read_unlock_blocking(eb);
1219 free_extent_buffer(eb);
1222 btrfs_set_header_bytenr(eb_rewin, eb->start);
1223 btrfs_set_header_backref_rev(eb_rewin,
1224 btrfs_header_backref_rev(eb));
1225 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1226 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1228 eb_rewin = btrfs_clone_extent_buffer(eb);
1230 btrfs_tree_read_unlock_blocking(eb);
1231 free_extent_buffer(eb);
1236 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1237 btrfs_tree_read_unlock_blocking(eb);
1238 free_extent_buffer(eb);
1240 extent_buffer_get(eb_rewin);
1241 btrfs_tree_read_lock(eb_rewin);
1242 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1243 WARN_ON(btrfs_header_nritems(eb_rewin) >
1244 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1250 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1251 * value. If there are no changes, the current root->root_node is returned. If
1252 * anything changed in between, there's a fresh buffer allocated on which the
1253 * rewind operations are done. In any case, the returned buffer is read locked.
1254 * Returns NULL on error (with no locks held).
1256 static inline struct extent_buffer *
1257 get_old_root(struct btrfs_root *root, u64 time_seq)
1259 struct tree_mod_elem *tm;
1260 struct extent_buffer *eb = NULL;
1261 struct extent_buffer *eb_root;
1262 struct extent_buffer *old;
1263 struct tree_mod_root *old_root = NULL;
1264 u64 old_generation = 0;
1268 eb_root = btrfs_read_lock_root_node(root);
1269 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1273 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1274 old_root = &tm->old_root;
1275 old_generation = tm->generation;
1276 logical = old_root->logical;
1278 logical = eb_root->start;
1281 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1282 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1283 btrfs_tree_read_unlock(eb_root);
1284 free_extent_buffer(eb_root);
1285 blocksize = btrfs_level_size(root, old_root->level);
1286 old = read_tree_block(root, logical, blocksize, 0);
1287 if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1288 free_extent_buffer(old);
1289 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1292 eb = btrfs_clone_extent_buffer(old);
1293 free_extent_buffer(old);
1295 } else if (old_root) {
1296 btrfs_tree_read_unlock(eb_root);
1297 free_extent_buffer(eb_root);
1298 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1300 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1301 eb = btrfs_clone_extent_buffer(eb_root);
1302 btrfs_tree_read_unlock_blocking(eb_root);
1303 free_extent_buffer(eb_root);
1308 extent_buffer_get(eb);
1309 btrfs_tree_read_lock(eb);
1311 btrfs_set_header_bytenr(eb, eb->start);
1312 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1313 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1314 btrfs_set_header_level(eb, old_root->level);
1315 btrfs_set_header_generation(eb, old_generation);
1318 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1320 WARN_ON(btrfs_header_level(eb) != 0);
1321 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1326 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1328 struct tree_mod_elem *tm;
1330 struct extent_buffer *eb_root = btrfs_root_node(root);
1332 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1333 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1334 level = tm->old_root.level;
1336 level = btrfs_header_level(eb_root);
1338 free_extent_buffer(eb_root);
1343 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1344 struct btrfs_root *root,
1345 struct extent_buffer *buf)
1347 /* ensure we can see the force_cow */
1351 * We do not need to cow a block if
1352 * 1) this block is not created or changed in this transaction;
1353 * 2) this block does not belong to TREE_RELOC tree;
1354 * 3) the root is not forced COW.
1356 * What is forced COW:
1357 * when we create snapshot during commiting the transaction,
1358 * after we've finished coping src root, we must COW the shared
1359 * block to ensure the metadata consistency.
1361 if (btrfs_header_generation(buf) == trans->transid &&
1362 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1363 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1364 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1371 * cows a single block, see __btrfs_cow_block for the real work.
1372 * This version of it has extra checks so that a block isn't cow'd more than
1373 * once per transaction, as long as it hasn't been written yet
1375 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1376 struct btrfs_root *root, struct extent_buffer *buf,
1377 struct extent_buffer *parent, int parent_slot,
1378 struct extent_buffer **cow_ret)
1383 if (trans->transaction != root->fs_info->running_transaction)
1384 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1386 root->fs_info->running_transaction->transid);
1388 if (trans->transid != root->fs_info->generation)
1389 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1390 trans->transid, root->fs_info->generation);
1392 if (!should_cow_block(trans, root, buf)) {
1397 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1400 btrfs_set_lock_blocking(parent);
1401 btrfs_set_lock_blocking(buf);
1403 ret = __btrfs_cow_block(trans, root, buf, parent,
1404 parent_slot, cow_ret, search_start, 0);
1406 trace_btrfs_cow_block(root, buf, *cow_ret);
1412 * helper function for defrag to decide if two blocks pointed to by a
1413 * node are actually close by
1415 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1417 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1419 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1425 * compare two keys in a memcmp fashion
1427 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1429 struct btrfs_key k1;
1431 btrfs_disk_key_to_cpu(&k1, disk);
1433 return btrfs_comp_cpu_keys(&k1, k2);
1437 * same as comp_keys only with two btrfs_key's
1439 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1441 if (k1->objectid > k2->objectid)
1443 if (k1->objectid < k2->objectid)
1445 if (k1->type > k2->type)
1447 if (k1->type < k2->type)
1449 if (k1->offset > k2->offset)
1451 if (k1->offset < k2->offset)
1457 * this is used by the defrag code to go through all the
1458 * leaves pointed to by a node and reallocate them so that
1459 * disk order is close to key order
1461 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1462 struct btrfs_root *root, struct extent_buffer *parent,
1463 int start_slot, u64 *last_ret,
1464 struct btrfs_key *progress)
1466 struct extent_buffer *cur;
1469 u64 search_start = *last_ret;
1479 int progress_passed = 0;
1480 struct btrfs_disk_key disk_key;
1482 parent_level = btrfs_header_level(parent);
1484 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1485 WARN_ON(trans->transid != root->fs_info->generation);
1487 parent_nritems = btrfs_header_nritems(parent);
1488 blocksize = btrfs_level_size(root, parent_level - 1);
1489 end_slot = parent_nritems;
1491 if (parent_nritems == 1)
1494 btrfs_set_lock_blocking(parent);
1496 for (i = start_slot; i < end_slot; i++) {
1499 btrfs_node_key(parent, &disk_key, i);
1500 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1503 progress_passed = 1;
1504 blocknr = btrfs_node_blockptr(parent, i);
1505 gen = btrfs_node_ptr_generation(parent, i);
1506 if (last_block == 0)
1507 last_block = blocknr;
1510 other = btrfs_node_blockptr(parent, i - 1);
1511 close = close_blocks(blocknr, other, blocksize);
1513 if (!close && i < end_slot - 2) {
1514 other = btrfs_node_blockptr(parent, i + 1);
1515 close = close_blocks(blocknr, other, blocksize);
1518 last_block = blocknr;
1522 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1524 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1527 if (!cur || !uptodate) {
1529 cur = read_tree_block(root, blocknr,
1531 if (!cur || !extent_buffer_uptodate(cur)) {
1532 free_extent_buffer(cur);
1535 } else if (!uptodate) {
1536 err = btrfs_read_buffer(cur, gen);
1538 free_extent_buffer(cur);
1543 if (search_start == 0)
1544 search_start = last_block;
1546 btrfs_tree_lock(cur);
1547 btrfs_set_lock_blocking(cur);
1548 err = __btrfs_cow_block(trans, root, cur, parent, i,
1551 (end_slot - i) * blocksize));
1553 btrfs_tree_unlock(cur);
1554 free_extent_buffer(cur);
1557 search_start = cur->start;
1558 last_block = cur->start;
1559 *last_ret = search_start;
1560 btrfs_tree_unlock(cur);
1561 free_extent_buffer(cur);
1567 * The leaf data grows from end-to-front in the node.
1568 * this returns the address of the start of the last item,
1569 * which is the stop of the leaf data stack
1571 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1572 struct extent_buffer *leaf)
1574 u32 nr = btrfs_header_nritems(leaf);
1576 return BTRFS_LEAF_DATA_SIZE(root);
1577 return btrfs_item_offset_nr(leaf, nr - 1);
1582 * search for key in the extent_buffer. The items start at offset p,
1583 * and they are item_size apart. There are 'max' items in p.
1585 * the slot in the array is returned via slot, and it points to
1586 * the place where you would insert key if it is not found in
1589 * slot may point to max if the key is bigger than all of the keys
1591 static noinline int generic_bin_search(struct extent_buffer *eb,
1593 int item_size, struct btrfs_key *key,
1600 struct btrfs_disk_key *tmp = NULL;
1601 struct btrfs_disk_key unaligned;
1602 unsigned long offset;
1604 unsigned long map_start = 0;
1605 unsigned long map_len = 0;
1608 while (low < high) {
1609 mid = (low + high) / 2;
1610 offset = p + mid * item_size;
1612 if (!kaddr || offset < map_start ||
1613 (offset + sizeof(struct btrfs_disk_key)) >
1614 map_start + map_len) {
1616 err = map_private_extent_buffer(eb, offset,
1617 sizeof(struct btrfs_disk_key),
1618 &kaddr, &map_start, &map_len);
1621 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1624 read_extent_buffer(eb, &unaligned,
1625 offset, sizeof(unaligned));
1630 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1633 ret = comp_keys(tmp, key);
1649 * simple bin_search frontend that does the right thing for
1652 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1653 int level, int *slot)
1656 return generic_bin_search(eb,
1657 offsetof(struct btrfs_leaf, items),
1658 sizeof(struct btrfs_item),
1659 key, btrfs_header_nritems(eb),
1662 return generic_bin_search(eb,
1663 offsetof(struct btrfs_node, ptrs),
1664 sizeof(struct btrfs_key_ptr),
1665 key, btrfs_header_nritems(eb),
1669 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1670 int level, int *slot)
1672 return bin_search(eb, key, level, slot);
1675 static void root_add_used(struct btrfs_root *root, u32 size)
1677 spin_lock(&root->accounting_lock);
1678 btrfs_set_root_used(&root->root_item,
1679 btrfs_root_used(&root->root_item) + size);
1680 spin_unlock(&root->accounting_lock);
1683 static void root_sub_used(struct btrfs_root *root, u32 size)
1685 spin_lock(&root->accounting_lock);
1686 btrfs_set_root_used(&root->root_item,
1687 btrfs_root_used(&root->root_item) - size);
1688 spin_unlock(&root->accounting_lock);
1691 /* given a node and slot number, this reads the blocks it points to. The
1692 * extent buffer is returned with a reference taken (but unlocked).
1693 * NULL is returned on error.
1695 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1696 struct extent_buffer *parent, int slot)
1698 int level = btrfs_header_level(parent);
1699 struct extent_buffer *eb;
1703 if (slot >= btrfs_header_nritems(parent))
1708 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1709 btrfs_level_size(root, level - 1),
1710 btrfs_node_ptr_generation(parent, slot));
1711 if (eb && !extent_buffer_uptodate(eb)) {
1712 free_extent_buffer(eb);
1720 * node level balancing, used to make sure nodes are in proper order for
1721 * item deletion. We balance from the top down, so we have to make sure
1722 * that a deletion won't leave an node completely empty later on.
1724 static noinline int balance_level(struct btrfs_trans_handle *trans,
1725 struct btrfs_root *root,
1726 struct btrfs_path *path, int level)
1728 struct extent_buffer *right = NULL;
1729 struct extent_buffer *mid;
1730 struct extent_buffer *left = NULL;
1731 struct extent_buffer *parent = NULL;
1735 int orig_slot = path->slots[level];
1741 mid = path->nodes[level];
1743 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1744 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1745 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1747 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1749 if (level < BTRFS_MAX_LEVEL - 1) {
1750 parent = path->nodes[level + 1];
1751 pslot = path->slots[level + 1];
1755 * deal with the case where there is only one pointer in the root
1756 * by promoting the node below to a root
1759 struct extent_buffer *child;
1761 if (btrfs_header_nritems(mid) != 1)
1764 /* promote the child to a root */
1765 child = read_node_slot(root, mid, 0);
1768 btrfs_std_error(root->fs_info, ret);
1772 btrfs_tree_lock(child);
1773 btrfs_set_lock_blocking(child);
1774 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1776 btrfs_tree_unlock(child);
1777 free_extent_buffer(child);
1781 tree_mod_log_set_root_pointer(root, child, 1);
1782 rcu_assign_pointer(root->node, child);
1784 add_root_to_dirty_list(root);
1785 btrfs_tree_unlock(child);
1787 path->locks[level] = 0;
1788 path->nodes[level] = NULL;
1789 clean_tree_block(trans, root, mid);
1790 btrfs_tree_unlock(mid);
1791 /* once for the path */
1792 free_extent_buffer(mid);
1794 root_sub_used(root, mid->len);
1795 btrfs_free_tree_block(trans, root, mid, 0, 1);
1796 /* once for the root ptr */
1797 free_extent_buffer_stale(mid);
1800 if (btrfs_header_nritems(mid) >
1801 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1804 left = read_node_slot(root, parent, pslot - 1);
1806 btrfs_tree_lock(left);
1807 btrfs_set_lock_blocking(left);
1808 wret = btrfs_cow_block(trans, root, left,
1809 parent, pslot - 1, &left);
1815 right = read_node_slot(root, parent, pslot + 1);
1817 btrfs_tree_lock(right);
1818 btrfs_set_lock_blocking(right);
1819 wret = btrfs_cow_block(trans, root, right,
1820 parent, pslot + 1, &right);
1827 /* first, try to make some room in the middle buffer */
1829 orig_slot += btrfs_header_nritems(left);
1830 wret = push_node_left(trans, root, left, mid, 1);
1836 * then try to empty the right most buffer into the middle
1839 wret = push_node_left(trans, root, mid, right, 1);
1840 if (wret < 0 && wret != -ENOSPC)
1842 if (btrfs_header_nritems(right) == 0) {
1843 clean_tree_block(trans, root, right);
1844 btrfs_tree_unlock(right);
1845 del_ptr(root, path, level + 1, pslot + 1);
1846 root_sub_used(root, right->len);
1847 btrfs_free_tree_block(trans, root, right, 0, 1);
1848 free_extent_buffer_stale(right);
1851 struct btrfs_disk_key right_key;
1852 btrfs_node_key(right, &right_key, 0);
1853 tree_mod_log_set_node_key(root->fs_info, parent,
1855 btrfs_set_node_key(parent, &right_key, pslot + 1);
1856 btrfs_mark_buffer_dirty(parent);
1859 if (btrfs_header_nritems(mid) == 1) {
1861 * we're not allowed to leave a node with one item in the
1862 * tree during a delete. A deletion from lower in the tree
1863 * could try to delete the only pointer in this node.
1864 * So, pull some keys from the left.
1865 * There has to be a left pointer at this point because
1866 * otherwise we would have pulled some pointers from the
1871 btrfs_std_error(root->fs_info, ret);
1874 wret = balance_node_right(trans, root, mid, left);
1880 wret = push_node_left(trans, root, left, mid, 1);
1886 if (btrfs_header_nritems(mid) == 0) {
1887 clean_tree_block(trans, root, mid);
1888 btrfs_tree_unlock(mid);
1889 del_ptr(root, path, level + 1, pslot);
1890 root_sub_used(root, mid->len);
1891 btrfs_free_tree_block(trans, root, mid, 0, 1);
1892 free_extent_buffer_stale(mid);
1895 /* update the parent key to reflect our changes */
1896 struct btrfs_disk_key mid_key;
1897 btrfs_node_key(mid, &mid_key, 0);
1898 tree_mod_log_set_node_key(root->fs_info, parent,
1900 btrfs_set_node_key(parent, &mid_key, pslot);
1901 btrfs_mark_buffer_dirty(parent);
1904 /* update the path */
1906 if (btrfs_header_nritems(left) > orig_slot) {
1907 extent_buffer_get(left);
1908 /* left was locked after cow */
1909 path->nodes[level] = left;
1910 path->slots[level + 1] -= 1;
1911 path->slots[level] = orig_slot;
1913 btrfs_tree_unlock(mid);
1914 free_extent_buffer(mid);
1917 orig_slot -= btrfs_header_nritems(left);
1918 path->slots[level] = orig_slot;
1921 /* double check we haven't messed things up */
1923 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1927 btrfs_tree_unlock(right);
1928 free_extent_buffer(right);
1931 if (path->nodes[level] != left)
1932 btrfs_tree_unlock(left);
1933 free_extent_buffer(left);
1938 /* Node balancing for insertion. Here we only split or push nodes around
1939 * when they are completely full. This is also done top down, so we
1940 * have to be pessimistic.
1942 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1943 struct btrfs_root *root,
1944 struct btrfs_path *path, int level)
1946 struct extent_buffer *right = NULL;
1947 struct extent_buffer *mid;
1948 struct extent_buffer *left = NULL;
1949 struct extent_buffer *parent = NULL;
1953 int orig_slot = path->slots[level];
1958 mid = path->nodes[level];
1959 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1961 if (level < BTRFS_MAX_LEVEL - 1) {
1962 parent = path->nodes[level + 1];
1963 pslot = path->slots[level + 1];
1969 left = read_node_slot(root, parent, pslot - 1);
1971 /* first, try to make some room in the middle buffer */
1975 btrfs_tree_lock(left);
1976 btrfs_set_lock_blocking(left);
1978 left_nr = btrfs_header_nritems(left);
1979 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1982 ret = btrfs_cow_block(trans, root, left, parent,
1987 wret = push_node_left(trans, root,
1994 struct btrfs_disk_key disk_key;
1995 orig_slot += left_nr;
1996 btrfs_node_key(mid, &disk_key, 0);
1997 tree_mod_log_set_node_key(root->fs_info, parent,
1999 btrfs_set_node_key(parent, &disk_key, pslot);
2000 btrfs_mark_buffer_dirty(parent);
2001 if (btrfs_header_nritems(left) > orig_slot) {
2002 path->nodes[level] = left;
2003 path->slots[level + 1] -= 1;
2004 path->slots[level] = orig_slot;
2005 btrfs_tree_unlock(mid);
2006 free_extent_buffer(mid);
2009 btrfs_header_nritems(left);
2010 path->slots[level] = orig_slot;
2011 btrfs_tree_unlock(left);
2012 free_extent_buffer(left);
2016 btrfs_tree_unlock(left);
2017 free_extent_buffer(left);
2019 right = read_node_slot(root, parent, pslot + 1);
2022 * then try to empty the right most buffer into the middle
2027 btrfs_tree_lock(right);
2028 btrfs_set_lock_blocking(right);
2030 right_nr = btrfs_header_nritems(right);
2031 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2034 ret = btrfs_cow_block(trans, root, right,
2040 wret = balance_node_right(trans, root,
2047 struct btrfs_disk_key disk_key;
2049 btrfs_node_key(right, &disk_key, 0);
2050 tree_mod_log_set_node_key(root->fs_info, parent,
2052 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2053 btrfs_mark_buffer_dirty(parent);
2055 if (btrfs_header_nritems(mid) <= orig_slot) {
2056 path->nodes[level] = right;
2057 path->slots[level + 1] += 1;
2058 path->slots[level] = orig_slot -
2059 btrfs_header_nritems(mid);
2060 btrfs_tree_unlock(mid);
2061 free_extent_buffer(mid);
2063 btrfs_tree_unlock(right);
2064 free_extent_buffer(right);
2068 btrfs_tree_unlock(right);
2069 free_extent_buffer(right);
2075 * readahead one full node of leaves, finding things that are close
2076 * to the block in 'slot', and triggering ra on them.
2078 static void reada_for_search(struct btrfs_root *root,
2079 struct btrfs_path *path,
2080 int level, int slot, u64 objectid)
2082 struct extent_buffer *node;
2083 struct btrfs_disk_key disk_key;
2089 int direction = path->reada;
2090 struct extent_buffer *eb;
2098 if (!path->nodes[level])
2101 node = path->nodes[level];
2103 search = btrfs_node_blockptr(node, slot);
2104 blocksize = btrfs_level_size(root, level - 1);
2105 eb = btrfs_find_tree_block(root, search, blocksize);
2107 free_extent_buffer(eb);
2113 nritems = btrfs_header_nritems(node);
2117 if (direction < 0) {
2121 } else if (direction > 0) {
2126 if (path->reada < 0 && objectid) {
2127 btrfs_node_key(node, &disk_key, nr);
2128 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2131 search = btrfs_node_blockptr(node, nr);
2132 if ((search <= target && target - search <= 65536) ||
2133 (search > target && search - target <= 65536)) {
2134 gen = btrfs_node_ptr_generation(node, nr);
2135 readahead_tree_block(root, search, blocksize, gen);
2139 if ((nread > 65536 || nscan > 32))
2144 static noinline void reada_for_balance(struct btrfs_root *root,
2145 struct btrfs_path *path, int level)
2149 struct extent_buffer *parent;
2150 struct extent_buffer *eb;
2156 parent = path->nodes[level + 1];
2160 nritems = btrfs_header_nritems(parent);
2161 slot = path->slots[level + 1];
2162 blocksize = btrfs_level_size(root, level);
2165 block1 = btrfs_node_blockptr(parent, slot - 1);
2166 gen = btrfs_node_ptr_generation(parent, slot - 1);
2167 eb = btrfs_find_tree_block(root, block1, blocksize);
2169 * if we get -eagain from btrfs_buffer_uptodate, we
2170 * don't want to return eagain here. That will loop
2173 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2175 free_extent_buffer(eb);
2177 if (slot + 1 < nritems) {
2178 block2 = btrfs_node_blockptr(parent, slot + 1);
2179 gen = btrfs_node_ptr_generation(parent, slot + 1);
2180 eb = btrfs_find_tree_block(root, block2, blocksize);
2181 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2183 free_extent_buffer(eb);
2187 readahead_tree_block(root, block1, blocksize, 0);
2189 readahead_tree_block(root, block2, blocksize, 0);
2194 * when we walk down the tree, it is usually safe to unlock the higher layers
2195 * in the tree. The exceptions are when our path goes through slot 0, because
2196 * operations on the tree might require changing key pointers higher up in the
2199 * callers might also have set path->keep_locks, which tells this code to keep
2200 * the lock if the path points to the last slot in the block. This is part of
2201 * walking through the tree, and selecting the next slot in the higher block.
2203 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2204 * if lowest_unlock is 1, level 0 won't be unlocked
2206 static noinline void unlock_up(struct btrfs_path *path, int level,
2207 int lowest_unlock, int min_write_lock_level,
2208 int *write_lock_level)
2211 int skip_level = level;
2213 struct extent_buffer *t;
2215 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2216 if (!path->nodes[i])
2218 if (!path->locks[i])
2220 if (!no_skips && path->slots[i] == 0) {
2224 if (!no_skips && path->keep_locks) {
2227 nritems = btrfs_header_nritems(t);
2228 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2233 if (skip_level < i && i >= lowest_unlock)
2237 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2238 btrfs_tree_unlock_rw(t, path->locks[i]);
2240 if (write_lock_level &&
2241 i > min_write_lock_level &&
2242 i <= *write_lock_level) {
2243 *write_lock_level = i - 1;
2250 * This releases any locks held in the path starting at level and
2251 * going all the way up to the root.
2253 * btrfs_search_slot will keep the lock held on higher nodes in a few
2254 * corner cases, such as COW of the block at slot zero in the node. This
2255 * ignores those rules, and it should only be called when there are no
2256 * more updates to be done higher up in the tree.
2258 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2262 if (path->keep_locks)
2265 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2266 if (!path->nodes[i])
2268 if (!path->locks[i])
2270 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2276 * helper function for btrfs_search_slot. The goal is to find a block
2277 * in cache without setting the path to blocking. If we find the block
2278 * we return zero and the path is unchanged.
2280 * If we can't find the block, we set the path blocking and do some
2281 * reada. -EAGAIN is returned and the search must be repeated.
2284 read_block_for_search(struct btrfs_trans_handle *trans,
2285 struct btrfs_root *root, struct btrfs_path *p,
2286 struct extent_buffer **eb_ret, int level, int slot,
2287 struct btrfs_key *key, u64 time_seq)
2292 struct extent_buffer *b = *eb_ret;
2293 struct extent_buffer *tmp;
2296 blocknr = btrfs_node_blockptr(b, slot);
2297 gen = btrfs_node_ptr_generation(b, slot);
2298 blocksize = btrfs_level_size(root, level - 1);
2300 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2302 /* first we do an atomic uptodate check */
2303 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2308 /* the pages were up to date, but we failed
2309 * the generation number check. Do a full
2310 * read for the generation number that is correct.
2311 * We must do this without dropping locks so
2312 * we can trust our generation number
2314 btrfs_set_path_blocking(p);
2316 /* now we're allowed to do a blocking uptodate check */
2317 ret = btrfs_read_buffer(tmp, gen);
2322 free_extent_buffer(tmp);
2323 btrfs_release_path(p);
2328 * reduce lock contention at high levels
2329 * of the btree by dropping locks before
2330 * we read. Don't release the lock on the current
2331 * level because we need to walk this node to figure
2332 * out which blocks to read.
2334 btrfs_unlock_up_safe(p, level + 1);
2335 btrfs_set_path_blocking(p);
2337 free_extent_buffer(tmp);
2339 reada_for_search(root, p, level, slot, key->objectid);
2341 btrfs_release_path(p);
2344 tmp = read_tree_block(root, blocknr, blocksize, 0);
2347 * If the read above didn't mark this buffer up to date,
2348 * it will never end up being up to date. Set ret to EIO now
2349 * and give up so that our caller doesn't loop forever
2352 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2354 free_extent_buffer(tmp);
2360 * helper function for btrfs_search_slot. This does all of the checks
2361 * for node-level blocks and does any balancing required based on
2364 * If no extra work was required, zero is returned. If we had to
2365 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2369 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2370 struct btrfs_root *root, struct btrfs_path *p,
2371 struct extent_buffer *b, int level, int ins_len,
2372 int *write_lock_level)
2375 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2376 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2379 if (*write_lock_level < level + 1) {
2380 *write_lock_level = level + 1;
2381 btrfs_release_path(p);
2385 btrfs_set_path_blocking(p);
2386 reada_for_balance(root, p, level);
2387 sret = split_node(trans, root, p, level);
2388 btrfs_clear_path_blocking(p, NULL, 0);
2395 b = p->nodes[level];
2396 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2397 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2400 if (*write_lock_level < level + 1) {
2401 *write_lock_level = level + 1;
2402 btrfs_release_path(p);
2406 btrfs_set_path_blocking(p);
2407 reada_for_balance(root, p, level);
2408 sret = balance_level(trans, root, p, level);
2409 btrfs_clear_path_blocking(p, NULL, 0);
2415 b = p->nodes[level];
2417 btrfs_release_path(p);
2420 BUG_ON(btrfs_header_nritems(b) == 1);
2430 static void key_search_validate(struct extent_buffer *b,
2431 struct btrfs_key *key,
2434 #ifdef CONFIG_BTRFS_ASSERT
2435 struct btrfs_disk_key disk_key;
2437 btrfs_cpu_key_to_disk(&disk_key, key);
2440 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2441 offsetof(struct btrfs_leaf, items[0].key),
2444 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2445 offsetof(struct btrfs_node, ptrs[0].key),
2450 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2451 int level, int *prev_cmp, int *slot)
2453 if (*prev_cmp != 0) {
2454 *prev_cmp = bin_search(b, key, level, slot);
2458 key_search_validate(b, key, level);
2464 /* Proposed generic search function, meant to take the place of the
2465 * various small search helper functions throughout the code and standardize
2466 * the search interface. Right now, it only replaces the former __inode_info
2467 * in backref.c, and the former btrfs_find_root_ref in root-tree.c.
2469 * If a null key is passed, it returns immediately after running
2470 * btrfs_search_slot, leaving the path filled as it is and passing its
2471 * return value upward. If a real key is passed, it will set the caller's
2472 * path to point to the first item in the tree after its specified
2473 * objectid, type, and offset for which objectid and type match the input.
2475 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2476 u64 iobjectid, u64 ioff, u8 key_type,
2477 struct btrfs_key *found_key)
2480 struct btrfs_key key;
2481 struct extent_buffer *eb;
2483 key.type = key_type;
2484 key.objectid = iobjectid;
2487 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2488 if ((ret < 0) || (found_key == NULL))
2491 eb = path->nodes[0];
2492 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2493 ret = btrfs_next_leaf(fs_root, path);
2496 eb = path->nodes[0];
2499 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2500 if (found_key->type != key.type ||
2501 found_key->objectid != key.objectid)
2508 * look for key in the tree. path is filled in with nodes along the way
2509 * if key is found, we return zero and you can find the item in the leaf
2510 * level of the path (level 0)
2512 * If the key isn't found, the path points to the slot where it should
2513 * be inserted, and 1 is returned. If there are other errors during the
2514 * search a negative error number is returned.
2516 * if ins_len > 0, nodes and leaves will be split as we walk down the
2517 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2520 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2521 *root, struct btrfs_key *key, struct btrfs_path *p, int
2524 struct extent_buffer *b;
2529 int lowest_unlock = 1;
2531 /* everything at write_lock_level or lower must be write locked */
2532 int write_lock_level = 0;
2533 u8 lowest_level = 0;
2534 int min_write_lock_level;
2537 lowest_level = p->lowest_level;
2538 WARN_ON(lowest_level && ins_len > 0);
2539 WARN_ON(p->nodes[0] != NULL);
2544 /* when we are removing items, we might have to go up to level
2545 * two as we update tree pointers Make sure we keep write
2546 * for those levels as well
2548 write_lock_level = 2;
2549 } else if (ins_len > 0) {
2551 * for inserting items, make sure we have a write lock on
2552 * level 1 so we can update keys
2554 write_lock_level = 1;
2558 write_lock_level = -1;
2560 if (cow && (p->keep_locks || p->lowest_level))
2561 write_lock_level = BTRFS_MAX_LEVEL;
2563 min_write_lock_level = write_lock_level;
2568 * we try very hard to do read locks on the root
2570 root_lock = BTRFS_READ_LOCK;
2572 if (p->search_commit_root) {
2574 * the commit roots are read only
2575 * so we always do read locks
2577 b = root->commit_root;
2578 extent_buffer_get(b);
2579 level = btrfs_header_level(b);
2580 if (!p->skip_locking)
2581 btrfs_tree_read_lock(b);
2583 if (p->skip_locking) {
2584 b = btrfs_root_node(root);
2585 level = btrfs_header_level(b);
2587 /* we don't know the level of the root node
2588 * until we actually have it read locked
2590 b = btrfs_read_lock_root_node(root);
2591 level = btrfs_header_level(b);
2592 if (level <= write_lock_level) {
2593 /* whoops, must trade for write lock */
2594 btrfs_tree_read_unlock(b);
2595 free_extent_buffer(b);
2596 b = btrfs_lock_root_node(root);
2597 root_lock = BTRFS_WRITE_LOCK;
2599 /* the level might have changed, check again */
2600 level = btrfs_header_level(b);
2604 p->nodes[level] = b;
2605 if (!p->skip_locking)
2606 p->locks[level] = root_lock;
2609 level = btrfs_header_level(b);
2612 * setup the path here so we can release it under lock
2613 * contention with the cow code
2617 * if we don't really need to cow this block
2618 * then we don't want to set the path blocking,
2619 * so we test it here
2621 if (!should_cow_block(trans, root, b))
2624 btrfs_set_path_blocking(p);
2627 * must have write locks on this node and the
2630 if (level > write_lock_level ||
2631 (level + 1 > write_lock_level &&
2632 level + 1 < BTRFS_MAX_LEVEL &&
2633 p->nodes[level + 1])) {
2634 write_lock_level = level + 1;
2635 btrfs_release_path(p);
2639 err = btrfs_cow_block(trans, root, b,
2640 p->nodes[level + 1],
2641 p->slots[level + 1], &b);
2648 BUG_ON(!cow && ins_len);
2650 p->nodes[level] = b;
2651 btrfs_clear_path_blocking(p, NULL, 0);
2654 * we have a lock on b and as long as we aren't changing
2655 * the tree, there is no way to for the items in b to change.
2656 * It is safe to drop the lock on our parent before we
2657 * go through the expensive btree search on b.
2659 * If cow is true, then we might be changing slot zero,
2660 * which may require changing the parent. So, we can't
2661 * drop the lock until after we know which slot we're
2665 btrfs_unlock_up_safe(p, level + 1);
2667 ret = key_search(b, key, level, &prev_cmp, &slot);
2671 if (ret && slot > 0) {
2675 p->slots[level] = slot;
2676 err = setup_nodes_for_search(trans, root, p, b, level,
2677 ins_len, &write_lock_level);
2684 b = p->nodes[level];
2685 slot = p->slots[level];
2688 * slot 0 is special, if we change the key
2689 * we have to update the parent pointer
2690 * which means we must have a write lock
2693 if (slot == 0 && cow &&
2694 write_lock_level < level + 1) {
2695 write_lock_level = level + 1;
2696 btrfs_release_path(p);
2700 unlock_up(p, level, lowest_unlock,
2701 min_write_lock_level, &write_lock_level);
2703 if (level == lowest_level) {
2709 err = read_block_for_search(trans, root, p,
2710 &b, level, slot, key, 0);
2718 if (!p->skip_locking) {
2719 level = btrfs_header_level(b);
2720 if (level <= write_lock_level) {
2721 err = btrfs_try_tree_write_lock(b);
2723 btrfs_set_path_blocking(p);
2725 btrfs_clear_path_blocking(p, b,
2728 p->locks[level] = BTRFS_WRITE_LOCK;
2730 err = btrfs_try_tree_read_lock(b);
2732 btrfs_set_path_blocking(p);
2733 btrfs_tree_read_lock(b);
2734 btrfs_clear_path_blocking(p, b,
2737 p->locks[level] = BTRFS_READ_LOCK;
2739 p->nodes[level] = b;
2742 p->slots[level] = slot;
2744 btrfs_leaf_free_space(root, b) < ins_len) {
2745 if (write_lock_level < 1) {
2746 write_lock_level = 1;
2747 btrfs_release_path(p);
2751 btrfs_set_path_blocking(p);
2752 err = split_leaf(trans, root, key,
2753 p, ins_len, ret == 0);
2754 btrfs_clear_path_blocking(p, NULL, 0);
2762 if (!p->search_for_split)
2763 unlock_up(p, level, lowest_unlock,
2764 min_write_lock_level, &write_lock_level);
2771 * we don't really know what they plan on doing with the path
2772 * from here on, so for now just mark it as blocking
2774 if (!p->leave_spinning)
2775 btrfs_set_path_blocking(p);
2777 btrfs_release_path(p);
2782 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2783 * current state of the tree together with the operations recorded in the tree
2784 * modification log to search for the key in a previous version of this tree, as
2785 * denoted by the time_seq parameter.
2787 * Naturally, there is no support for insert, delete or cow operations.
2789 * The resulting path and return value will be set up as if we called
2790 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2792 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2793 struct btrfs_path *p, u64 time_seq)
2795 struct extent_buffer *b;
2800 int lowest_unlock = 1;
2801 u8 lowest_level = 0;
2804 lowest_level = p->lowest_level;
2805 WARN_ON(p->nodes[0] != NULL);
2807 if (p->search_commit_root) {
2809 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2813 b = get_old_root(root, time_seq);
2814 level = btrfs_header_level(b);
2815 p->locks[level] = BTRFS_READ_LOCK;
2818 level = btrfs_header_level(b);
2819 p->nodes[level] = b;
2820 btrfs_clear_path_blocking(p, NULL, 0);
2823 * we have a lock on b and as long as we aren't changing
2824 * the tree, there is no way to for the items in b to change.
2825 * It is safe to drop the lock on our parent before we
2826 * go through the expensive btree search on b.
2828 btrfs_unlock_up_safe(p, level + 1);
2831 * Since we can unwind eb's we want to do a real search every
2835 ret = key_search(b, key, level, &prev_cmp, &slot);
2839 if (ret && slot > 0) {
2843 p->slots[level] = slot;
2844 unlock_up(p, level, lowest_unlock, 0, NULL);
2846 if (level == lowest_level) {
2852 err = read_block_for_search(NULL, root, p, &b, level,
2853 slot, key, time_seq);
2861 level = btrfs_header_level(b);
2862 err = btrfs_try_tree_read_lock(b);
2864 btrfs_set_path_blocking(p);
2865 btrfs_tree_read_lock(b);
2866 btrfs_clear_path_blocking(p, b,
2869 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
2874 p->locks[level] = BTRFS_READ_LOCK;
2875 p->nodes[level] = b;
2877 p->slots[level] = slot;
2878 unlock_up(p, level, lowest_unlock, 0, NULL);
2884 if (!p->leave_spinning)
2885 btrfs_set_path_blocking(p);
2887 btrfs_release_path(p);
2893 * helper to use instead of search slot if no exact match is needed but
2894 * instead the next or previous item should be returned.
2895 * When find_higher is true, the next higher item is returned, the next lower
2897 * When return_any and find_higher are both true, and no higher item is found,
2898 * return the next lower instead.
2899 * When return_any is true and find_higher is false, and no lower item is found,
2900 * return the next higher instead.
2901 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2904 int btrfs_search_slot_for_read(struct btrfs_root *root,
2905 struct btrfs_key *key, struct btrfs_path *p,
2906 int find_higher, int return_any)
2909 struct extent_buffer *leaf;
2912 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2916 * a return value of 1 means the path is at the position where the
2917 * item should be inserted. Normally this is the next bigger item,
2918 * but in case the previous item is the last in a leaf, path points
2919 * to the first free slot in the previous leaf, i.e. at an invalid
2925 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2926 ret = btrfs_next_leaf(root, p);
2932 * no higher item found, return the next
2937 btrfs_release_path(p);
2941 if (p->slots[0] == 0) {
2942 ret = btrfs_prev_leaf(root, p);
2946 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2952 * no lower item found, return the next
2957 btrfs_release_path(p);
2967 * adjust the pointers going up the tree, starting at level
2968 * making sure the right key of each node is points to 'key'.
2969 * This is used after shifting pointers to the left, so it stops
2970 * fixing up pointers when a given leaf/node is not in slot 0 of the
2974 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
2975 struct btrfs_disk_key *key, int level)
2978 struct extent_buffer *t;
2980 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2981 int tslot = path->slots[i];
2982 if (!path->nodes[i])
2985 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2986 btrfs_set_node_key(t, key, tslot);
2987 btrfs_mark_buffer_dirty(path->nodes[i]);
2996 * This function isn't completely safe. It's the caller's responsibility
2997 * that the new key won't break the order
2999 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
3000 struct btrfs_key *new_key)
3002 struct btrfs_disk_key disk_key;
3003 struct extent_buffer *eb;
3006 eb = path->nodes[0];
3007 slot = path->slots[0];
3009 btrfs_item_key(eb, &disk_key, slot - 1);
3010 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3012 if (slot < btrfs_header_nritems(eb) - 1) {
3013 btrfs_item_key(eb, &disk_key, slot + 1);
3014 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3017 btrfs_cpu_key_to_disk(&disk_key, new_key);
3018 btrfs_set_item_key(eb, &disk_key, slot);
3019 btrfs_mark_buffer_dirty(eb);
3021 fixup_low_keys(root, path, &disk_key, 1);
3025 * try to push data from one node into the next node left in the
3028 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3029 * error, and > 0 if there was no room in the left hand block.
3031 static int push_node_left(struct btrfs_trans_handle *trans,
3032 struct btrfs_root *root, struct extent_buffer *dst,
3033 struct extent_buffer *src, int empty)
3040 src_nritems = btrfs_header_nritems(src);
3041 dst_nritems = btrfs_header_nritems(dst);
3042 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3043 WARN_ON(btrfs_header_generation(src) != trans->transid);
3044 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3046 if (!empty && src_nritems <= 8)
3049 if (push_items <= 0)
3053 push_items = min(src_nritems, push_items);
3054 if (push_items < src_nritems) {
3055 /* leave at least 8 pointers in the node if
3056 * we aren't going to empty it
3058 if (src_nritems - push_items < 8) {
3059 if (push_items <= 8)
3065 push_items = min(src_nritems - 8, push_items);
3067 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3069 copy_extent_buffer(dst, src,
3070 btrfs_node_key_ptr_offset(dst_nritems),
3071 btrfs_node_key_ptr_offset(0),
3072 push_items * sizeof(struct btrfs_key_ptr));
3074 if (push_items < src_nritems) {
3076 * don't call tree_mod_log_eb_move here, key removal was already
3077 * fully logged by tree_mod_log_eb_copy above.
3079 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3080 btrfs_node_key_ptr_offset(push_items),
3081 (src_nritems - push_items) *
3082 sizeof(struct btrfs_key_ptr));
3084 btrfs_set_header_nritems(src, src_nritems - push_items);
3085 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3086 btrfs_mark_buffer_dirty(src);
3087 btrfs_mark_buffer_dirty(dst);
3093 * try to push data from one node into the next node right in the
3096 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3097 * error, and > 0 if there was no room in the right hand block.
3099 * this will only push up to 1/2 the contents of the left node over
3101 static int balance_node_right(struct btrfs_trans_handle *trans,
3102 struct btrfs_root *root,
3103 struct extent_buffer *dst,
3104 struct extent_buffer *src)
3112 WARN_ON(btrfs_header_generation(src) != trans->transid);
3113 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3115 src_nritems = btrfs_header_nritems(src);
3116 dst_nritems = btrfs_header_nritems(dst);
3117 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3118 if (push_items <= 0)
3121 if (src_nritems < 4)
3124 max_push = src_nritems / 2 + 1;
3125 /* don't try to empty the node */
3126 if (max_push >= src_nritems)
3129 if (max_push < push_items)
3130 push_items = max_push;
3132 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3133 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3134 btrfs_node_key_ptr_offset(0),
3136 sizeof(struct btrfs_key_ptr));
3138 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3139 src_nritems - push_items, push_items);
3140 copy_extent_buffer(dst, src,
3141 btrfs_node_key_ptr_offset(0),
3142 btrfs_node_key_ptr_offset(src_nritems - push_items),
3143 push_items * sizeof(struct btrfs_key_ptr));
3145 btrfs_set_header_nritems(src, src_nritems - push_items);
3146 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3148 btrfs_mark_buffer_dirty(src);
3149 btrfs_mark_buffer_dirty(dst);
3155 * helper function to insert a new root level in the tree.
3156 * A new node is allocated, and a single item is inserted to
3157 * point to the existing root
3159 * returns zero on success or < 0 on failure.
3161 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3162 struct btrfs_root *root,
3163 struct btrfs_path *path, int level)
3166 struct extent_buffer *lower;
3167 struct extent_buffer *c;
3168 struct extent_buffer *old;
3169 struct btrfs_disk_key lower_key;
3171 BUG_ON(path->nodes[level]);
3172 BUG_ON(path->nodes[level-1] != root->node);
3174 lower = path->nodes[level-1];
3176 btrfs_item_key(lower, &lower_key, 0);
3178 btrfs_node_key(lower, &lower_key, 0);
3180 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3181 root->root_key.objectid, &lower_key,
3182 level, root->node->start, 0);
3186 root_add_used(root, root->nodesize);
3188 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3189 btrfs_set_header_nritems(c, 1);
3190 btrfs_set_header_level(c, level);
3191 btrfs_set_header_bytenr(c, c->start);
3192 btrfs_set_header_generation(c, trans->transid);
3193 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3194 btrfs_set_header_owner(c, root->root_key.objectid);
3196 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3199 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3200 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3202 btrfs_set_node_key(c, &lower_key, 0);
3203 btrfs_set_node_blockptr(c, 0, lower->start);
3204 lower_gen = btrfs_header_generation(lower);
3205 WARN_ON(lower_gen != trans->transid);
3207 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3209 btrfs_mark_buffer_dirty(c);
3212 tree_mod_log_set_root_pointer(root, c, 0);
3213 rcu_assign_pointer(root->node, c);
3215 /* the super has an extra ref to root->node */
3216 free_extent_buffer(old);
3218 add_root_to_dirty_list(root);
3219 extent_buffer_get(c);
3220 path->nodes[level] = c;
3221 path->locks[level] = BTRFS_WRITE_LOCK;
3222 path->slots[level] = 0;
3227 * worker function to insert a single pointer in a node.
3228 * the node should have enough room for the pointer already
3230 * slot and level indicate where you want the key to go, and
3231 * blocknr is the block the key points to.
3233 static void insert_ptr(struct btrfs_trans_handle *trans,
3234 struct btrfs_root *root, struct btrfs_path *path,
3235 struct btrfs_disk_key *key, u64 bytenr,
3236 int slot, int level)
3238 struct extent_buffer *lower;
3242 BUG_ON(!path->nodes[level]);
3243 btrfs_assert_tree_locked(path->nodes[level]);
3244 lower = path->nodes[level];
3245 nritems = btrfs_header_nritems(lower);
3246 BUG_ON(slot > nritems);
3247 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3248 if (slot != nritems) {
3250 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3251 slot, nritems - slot);
3252 memmove_extent_buffer(lower,
3253 btrfs_node_key_ptr_offset(slot + 1),
3254 btrfs_node_key_ptr_offset(slot),
3255 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3258 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3259 MOD_LOG_KEY_ADD, GFP_NOFS);
3262 btrfs_set_node_key(lower, key, slot);
3263 btrfs_set_node_blockptr(lower, slot, bytenr);
3264 WARN_ON(trans->transid == 0);
3265 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3266 btrfs_set_header_nritems(lower, nritems + 1);
3267 btrfs_mark_buffer_dirty(lower);
3271 * split the node at the specified level in path in two.
3272 * The path is corrected to point to the appropriate node after the split
3274 * Before splitting this tries to make some room in the node by pushing
3275 * left and right, if either one works, it returns right away.
3277 * returns 0 on success and < 0 on failure
3279 static noinline int split_node(struct btrfs_trans_handle *trans,
3280 struct btrfs_root *root,
3281 struct btrfs_path *path, int level)
3283 struct extent_buffer *c;
3284 struct extent_buffer *split;
3285 struct btrfs_disk_key disk_key;
3290 c = path->nodes[level];
3291 WARN_ON(btrfs_header_generation(c) != trans->transid);
3292 if (c == root->node) {
3294 * trying to split the root, lets make a new one
3296 * tree mod log: We don't log_removal old root in
3297 * insert_new_root, because that root buffer will be kept as a
3298 * normal node. We are going to log removal of half of the
3299 * elements below with tree_mod_log_eb_copy. We're holding a
3300 * tree lock on the buffer, which is why we cannot race with
3301 * other tree_mod_log users.
3303 ret = insert_new_root(trans, root, path, level + 1);
3307 ret = push_nodes_for_insert(trans, root, path, level);
3308 c = path->nodes[level];
3309 if (!ret && btrfs_header_nritems(c) <
3310 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3316 c_nritems = btrfs_header_nritems(c);
3317 mid = (c_nritems + 1) / 2;
3318 btrfs_node_key(c, &disk_key, mid);
3320 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3321 root->root_key.objectid,
3322 &disk_key, level, c->start, 0);
3324 return PTR_ERR(split);
3326 root_add_used(root, root->nodesize);
3328 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3329 btrfs_set_header_level(split, btrfs_header_level(c));
3330 btrfs_set_header_bytenr(split, split->start);
3331 btrfs_set_header_generation(split, trans->transid);
3332 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3333 btrfs_set_header_owner(split, root->root_key.objectid);
3334 write_extent_buffer(split, root->fs_info->fsid,
3335 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3336 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3337 btrfs_header_chunk_tree_uuid(split),
3340 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3341 copy_extent_buffer(split, c,
3342 btrfs_node_key_ptr_offset(0),
3343 btrfs_node_key_ptr_offset(mid),
3344 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3345 btrfs_set_header_nritems(split, c_nritems - mid);
3346 btrfs_set_header_nritems(c, mid);
3349 btrfs_mark_buffer_dirty(c);
3350 btrfs_mark_buffer_dirty(split);
3352 insert_ptr(trans, root, path, &disk_key, split->start,
3353 path->slots[level + 1] + 1, level + 1);
3355 if (path->slots[level] >= mid) {
3356 path->slots[level] -= mid;
3357 btrfs_tree_unlock(c);
3358 free_extent_buffer(c);
3359 path->nodes[level] = split;
3360 path->slots[level + 1] += 1;
3362 btrfs_tree_unlock(split);
3363 free_extent_buffer(split);
3369 * how many bytes are required to store the items in a leaf. start
3370 * and nr indicate which items in the leaf to check. This totals up the
3371 * space used both by the item structs and the item data
3373 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3375 struct btrfs_item *start_item;
3376 struct btrfs_item *end_item;
3377 struct btrfs_map_token token;
3379 int nritems = btrfs_header_nritems(l);
3380 int end = min(nritems, start + nr) - 1;
3384 btrfs_init_map_token(&token);
3385 start_item = btrfs_item_nr(start);
3386 end_item = btrfs_item_nr(end);
3387 data_len = btrfs_token_item_offset(l, start_item, &token) +
3388 btrfs_token_item_size(l, start_item, &token);
3389 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3390 data_len += sizeof(struct btrfs_item) * nr;
3391 WARN_ON(data_len < 0);
3396 * The space between the end of the leaf items and
3397 * the start of the leaf data. IOW, how much room
3398 * the leaf has left for both items and data
3400 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3401 struct extent_buffer *leaf)
3403 int nritems = btrfs_header_nritems(leaf);
3405 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3407 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3408 "used %d nritems %d\n",
3409 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3410 leaf_space_used(leaf, 0, nritems), nritems);
3416 * min slot controls the lowest index we're willing to push to the
3417 * right. We'll push up to and including min_slot, but no lower
3419 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3420 struct btrfs_root *root,
3421 struct btrfs_path *path,
3422 int data_size, int empty,
3423 struct extent_buffer *right,
3424 int free_space, u32 left_nritems,
3427 struct extent_buffer *left = path->nodes[0];
3428 struct extent_buffer *upper = path->nodes[1];
3429 struct btrfs_map_token token;
3430 struct btrfs_disk_key disk_key;
3435 struct btrfs_item *item;
3441 btrfs_init_map_token(&token);
3446 nr = max_t(u32, 1, min_slot);
3448 if (path->slots[0] >= left_nritems)
3449 push_space += data_size;
3451 slot = path->slots[1];
3452 i = left_nritems - 1;
3454 item = btrfs_item_nr(i);
3456 if (!empty && push_items > 0) {
3457 if (path->slots[0] > i)
3459 if (path->slots[0] == i) {
3460 int space = btrfs_leaf_free_space(root, left);
3461 if (space + push_space * 2 > free_space)
3466 if (path->slots[0] == i)
3467 push_space += data_size;
3469 this_item_size = btrfs_item_size(left, item);
3470 if (this_item_size + sizeof(*item) + push_space > free_space)
3474 push_space += this_item_size + sizeof(*item);
3480 if (push_items == 0)
3483 WARN_ON(!empty && push_items == left_nritems);
3485 /* push left to right */
3486 right_nritems = btrfs_header_nritems(right);
3488 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3489 push_space -= leaf_data_end(root, left);
3491 /* make room in the right data area */
3492 data_end = leaf_data_end(root, right);
3493 memmove_extent_buffer(right,
3494 btrfs_leaf_data(right) + data_end - push_space,
3495 btrfs_leaf_data(right) + data_end,
3496 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3498 /* copy from the left data area */
3499 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3500 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3501 btrfs_leaf_data(left) + leaf_data_end(root, left),
3504 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3505 btrfs_item_nr_offset(0),
3506 right_nritems * sizeof(struct btrfs_item));
3508 /* copy the items from left to right */
3509 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3510 btrfs_item_nr_offset(left_nritems - push_items),
3511 push_items * sizeof(struct btrfs_item));
3513 /* update the item pointers */
3514 right_nritems += push_items;
3515 btrfs_set_header_nritems(right, right_nritems);
3516 push_space = BTRFS_LEAF_DATA_SIZE(root);
3517 for (i = 0; i < right_nritems; i++) {
3518 item = btrfs_item_nr(i);
3519 push_space -= btrfs_token_item_size(right, item, &token);
3520 btrfs_set_token_item_offset(right, item, push_space, &token);
3523 left_nritems -= push_items;
3524 btrfs_set_header_nritems(left, left_nritems);
3527 btrfs_mark_buffer_dirty(left);
3529 clean_tree_block(trans, root, left);
3531 btrfs_mark_buffer_dirty(right);
3533 btrfs_item_key(right, &disk_key, 0);
3534 btrfs_set_node_key(upper, &disk_key, slot + 1);
3535 btrfs_mark_buffer_dirty(upper);
3537 /* then fixup the leaf pointer in the path */
3538 if (path->slots[0] >= left_nritems) {
3539 path->slots[0] -= left_nritems;
3540 if (btrfs_header_nritems(path->nodes[0]) == 0)
3541 clean_tree_block(trans, root, path->nodes[0]);
3542 btrfs_tree_unlock(path->nodes[0]);
3543 free_extent_buffer(path->nodes[0]);
3544 path->nodes[0] = right;
3545 path->slots[1] += 1;
3547 btrfs_tree_unlock(right);
3548 free_extent_buffer(right);
3553 btrfs_tree_unlock(right);
3554 free_extent_buffer(right);
3559 * push some data in the path leaf to the right, trying to free up at
3560 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3562 * returns 1 if the push failed because the other node didn't have enough
3563 * room, 0 if everything worked out and < 0 if there were major errors.
3565 * this will push starting from min_slot to the end of the leaf. It won't
3566 * push any slot lower than min_slot
3568 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3569 *root, struct btrfs_path *path,
3570 int min_data_size, int data_size,
3571 int empty, u32 min_slot)
3573 struct extent_buffer *left = path->nodes[0];
3574 struct extent_buffer *right;
3575 struct extent_buffer *upper;
3581 if (!path->nodes[1])
3584 slot = path->slots[1];
3585 upper = path->nodes[1];
3586 if (slot >= btrfs_header_nritems(upper) - 1)
3589 btrfs_assert_tree_locked(path->nodes[1]);
3591 right = read_node_slot(root, upper, slot + 1);
3595 btrfs_tree_lock(right);
3596 btrfs_set_lock_blocking(right);
3598 free_space = btrfs_leaf_free_space(root, right);
3599 if (free_space < data_size)
3602 /* cow and double check */
3603 ret = btrfs_cow_block(trans, root, right, upper,
3608 free_space = btrfs_leaf_free_space(root, right);
3609 if (free_space < data_size)
3612 left_nritems = btrfs_header_nritems(left);
3613 if (left_nritems == 0)
3616 return __push_leaf_right(trans, root, path, min_data_size, empty,
3617 right, free_space, left_nritems, min_slot);
3619 btrfs_tree_unlock(right);
3620 free_extent_buffer(right);
3625 * push some data in the path leaf to the left, trying to free up at
3626 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3628 * max_slot can put a limit on how far into the leaf we'll push items. The
3629 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3632 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3633 struct btrfs_root *root,
3634 struct btrfs_path *path, int data_size,
3635 int empty, struct extent_buffer *left,
3636 int free_space, u32 right_nritems,
3639 struct btrfs_disk_key disk_key;
3640 struct extent_buffer *right = path->nodes[0];
3644 struct btrfs_item *item;
3645 u32 old_left_nritems;
3649 u32 old_left_item_size;
3650 struct btrfs_map_token token;
3652 btrfs_init_map_token(&token);
3655 nr = min(right_nritems, max_slot);
3657 nr = min(right_nritems - 1, max_slot);
3659 for (i = 0; i < nr; i++) {
3660 item = btrfs_item_nr(i);
3662 if (!empty && push_items > 0) {
3663 if (path->slots[0] < i)
3665 if (path->slots[0] == i) {
3666 int space = btrfs_leaf_free_space(root, right);
3667 if (space + push_space * 2 > free_space)
3672 if (path->slots[0] == i)
3673 push_space += data_size;
3675 this_item_size = btrfs_item_size(right, item);
3676 if (this_item_size + sizeof(*item) + push_space > free_space)
3680 push_space += this_item_size + sizeof(*item);
3683 if (push_items == 0) {
3687 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3689 /* push data from right to left */
3690 copy_extent_buffer(left, right,
3691 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3692 btrfs_item_nr_offset(0),
3693 push_items * sizeof(struct btrfs_item));
3695 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3696 btrfs_item_offset_nr(right, push_items - 1);
3698 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3699 leaf_data_end(root, left) - push_space,
3700 btrfs_leaf_data(right) +
3701 btrfs_item_offset_nr(right, push_items - 1),
3703 old_left_nritems = btrfs_header_nritems(left);
3704 BUG_ON(old_left_nritems <= 0);
3706 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3707 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3710 item = btrfs_item_nr(i);
3712 ioff = btrfs_token_item_offset(left, item, &token);
3713 btrfs_set_token_item_offset(left, item,
3714 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3717 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3719 /* fixup right node */
3720 if (push_items > right_nritems)
3721 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3724 if (push_items < right_nritems) {
3725 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3726 leaf_data_end(root, right);
3727 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3728 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3729 btrfs_leaf_data(right) +
3730 leaf_data_end(root, right), push_space);
3732 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3733 btrfs_item_nr_offset(push_items),
3734 (btrfs_header_nritems(right) - push_items) *
3735 sizeof(struct btrfs_item));
3737 right_nritems -= push_items;
3738 btrfs_set_header_nritems(right, right_nritems);
3739 push_space = BTRFS_LEAF_DATA_SIZE(root);
3740 for (i = 0; i < right_nritems; i++) {
3741 item = btrfs_item_nr(i);
3743 push_space = push_space - btrfs_token_item_size(right,
3745 btrfs_set_token_item_offset(right, item, push_space, &token);
3748 btrfs_mark_buffer_dirty(left);
3750 btrfs_mark_buffer_dirty(right);
3752 clean_tree_block(trans, root, right);
3754 btrfs_item_key(right, &disk_key, 0);
3755 fixup_low_keys(root, path, &disk_key, 1);
3757 /* then fixup the leaf pointer in the path */
3758 if (path->slots[0] < push_items) {
3759 path->slots[0] += old_left_nritems;
3760 btrfs_tree_unlock(path->nodes[0]);
3761 free_extent_buffer(path->nodes[0]);
3762 path->nodes[0] = left;
3763 path->slots[1] -= 1;
3765 btrfs_tree_unlock(left);
3766 free_extent_buffer(left);
3767 path->slots[0] -= push_items;
3769 BUG_ON(path->slots[0] < 0);
3772 btrfs_tree_unlock(left);
3773 free_extent_buffer(left);
3778 * push some data in the path leaf to the left, trying to free up at
3779 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3781 * max_slot can put a limit on how far into the leaf we'll push items. The
3782 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3785 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3786 *root, struct btrfs_path *path, int min_data_size,
3787 int data_size, int empty, u32 max_slot)
3789 struct extent_buffer *right = path->nodes[0];
3790 struct extent_buffer *left;
3796 slot = path->slots[1];
3799 if (!path->nodes[1])
3802 right_nritems = btrfs_header_nritems(right);
3803 if (right_nritems == 0)
3806 btrfs_assert_tree_locked(path->nodes[1]);
3808 left = read_node_slot(root, path->nodes[1], slot - 1);
3812 btrfs_tree_lock(left);
3813 btrfs_set_lock_blocking(left);
3815 free_space = btrfs_leaf_free_space(root, left);
3816 if (free_space < data_size) {
3821 /* cow and double check */
3822 ret = btrfs_cow_block(trans, root, left,
3823 path->nodes[1], slot - 1, &left);
3825 /* we hit -ENOSPC, but it isn't fatal here */
3831 free_space = btrfs_leaf_free_space(root, left);
3832 if (free_space < data_size) {
3837 return __push_leaf_left(trans, root, path, min_data_size,
3838 empty, left, free_space, right_nritems,
3841 btrfs_tree_unlock(left);
3842 free_extent_buffer(left);
3847 * split the path's leaf in two, making sure there is at least data_size
3848 * available for the resulting leaf level of the path.
3850 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3851 struct btrfs_root *root,
3852 struct btrfs_path *path,
3853 struct extent_buffer *l,
3854 struct extent_buffer *right,
3855 int slot, int mid, int nritems)
3860 struct btrfs_disk_key disk_key;
3861 struct btrfs_map_token token;
3863 btrfs_init_map_token(&token);
3865 nritems = nritems - mid;
3866 btrfs_set_header_nritems(right, nritems);
3867 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3869 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3870 btrfs_item_nr_offset(mid),
3871 nritems * sizeof(struct btrfs_item));
3873 copy_extent_buffer(right, l,
3874 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3875 data_copy_size, btrfs_leaf_data(l) +
3876 leaf_data_end(root, l), data_copy_size);
3878 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3879 btrfs_item_end_nr(l, mid);
3881 for (i = 0; i < nritems; i++) {
3882 struct btrfs_item *item = btrfs_item_nr(i);
3885 ioff = btrfs_token_item_offset(right, item, &token);
3886 btrfs_set_token_item_offset(right, item,
3887 ioff + rt_data_off, &token);
3890 btrfs_set_header_nritems(l, mid);
3891 btrfs_item_key(right, &disk_key, 0);
3892 insert_ptr(trans, root, path, &disk_key, right->start,
3893 path->slots[1] + 1, 1);
3895 btrfs_mark_buffer_dirty(right);
3896 btrfs_mark_buffer_dirty(l);
3897 BUG_ON(path->slots[0] != slot);
3900 btrfs_tree_unlock(path->nodes[0]);
3901 free_extent_buffer(path->nodes[0]);
3902 path->nodes[0] = right;
3903 path->slots[0] -= mid;
3904 path->slots[1] += 1;
3906 btrfs_tree_unlock(right);
3907 free_extent_buffer(right);
3910 BUG_ON(path->slots[0] < 0);
3914 * double splits happen when we need to insert a big item in the middle
3915 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3916 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3919 * We avoid this by trying to push the items on either side of our target
3920 * into the adjacent leaves. If all goes well we can avoid the double split
3923 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3924 struct btrfs_root *root,
3925 struct btrfs_path *path,
3933 slot = path->slots[0];
3936 * try to push all the items after our slot into the
3939 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3946 nritems = btrfs_header_nritems(path->nodes[0]);
3948 * our goal is to get our slot at the start or end of a leaf. If
3949 * we've done so we're done
3951 if (path->slots[0] == 0 || path->slots[0] == nritems)
3954 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3957 /* try to push all the items before our slot into the next leaf */
3958 slot = path->slots[0];
3959 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3972 * split the path's leaf in two, making sure there is at least data_size
3973 * available for the resulting leaf level of the path.
3975 * returns 0 if all went well and < 0 on failure.
3977 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3978 struct btrfs_root *root,
3979 struct btrfs_key *ins_key,
3980 struct btrfs_path *path, int data_size,
3983 struct btrfs_disk_key disk_key;
3984 struct extent_buffer *l;
3988 struct extent_buffer *right;
3992 int num_doubles = 0;
3993 int tried_avoid_double = 0;
3996 slot = path->slots[0];
3997 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3998 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4001 /* first try to make some room by pushing left and right */
4002 if (data_size && path->nodes[1]) {
4003 wret = push_leaf_right(trans, root, path, data_size,
4008 wret = push_leaf_left(trans, root, path, data_size,
4009 data_size, 0, (u32)-1);
4015 /* did the pushes work? */
4016 if (btrfs_leaf_free_space(root, l) >= data_size)
4020 if (!path->nodes[1]) {
4021 ret = insert_new_root(trans, root, path, 1);
4028 slot = path->slots[0];
4029 nritems = btrfs_header_nritems(l);
4030 mid = (nritems + 1) / 2;
4034 leaf_space_used(l, mid, nritems - mid) + data_size >
4035 BTRFS_LEAF_DATA_SIZE(root)) {
4036 if (slot >= nritems) {
4040 if (mid != nritems &&
4041 leaf_space_used(l, mid, nritems - mid) +
4042 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4043 if (data_size && !tried_avoid_double)
4044 goto push_for_double;
4050 if (leaf_space_used(l, 0, mid) + data_size >
4051 BTRFS_LEAF_DATA_SIZE(root)) {
4052 if (!extend && data_size && slot == 0) {
4054 } else if ((extend || !data_size) && slot == 0) {
4058 if (mid != nritems &&
4059 leaf_space_used(l, mid, nritems - mid) +
4060 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4061 if (data_size && !tried_avoid_double)
4062 goto push_for_double;
4070 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4072 btrfs_item_key(l, &disk_key, mid);
4074 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4075 root->root_key.objectid,
4076 &disk_key, 0, l->start, 0);
4078 return PTR_ERR(right);
4080 root_add_used(root, root->leafsize);
4082 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4083 btrfs_set_header_bytenr(right, right->start);
4084 btrfs_set_header_generation(right, trans->transid);
4085 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4086 btrfs_set_header_owner(right, root->root_key.objectid);
4087 btrfs_set_header_level(right, 0);
4088 write_extent_buffer(right, root->fs_info->fsid,
4089 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4091 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4092 btrfs_header_chunk_tree_uuid(right),
4097 btrfs_set_header_nritems(right, 0);
4098 insert_ptr(trans, root, path, &disk_key, right->start,
4099 path->slots[1] + 1, 1);
4100 btrfs_tree_unlock(path->nodes[0]);
4101 free_extent_buffer(path->nodes[0]);
4102 path->nodes[0] = right;
4104 path->slots[1] += 1;
4106 btrfs_set_header_nritems(right, 0);
4107 insert_ptr(trans, root, path, &disk_key, right->start,
4109 btrfs_tree_unlock(path->nodes[0]);
4110 free_extent_buffer(path->nodes[0]);
4111 path->nodes[0] = right;
4113 if (path->slots[1] == 0)
4114 fixup_low_keys(root, path, &disk_key, 1);
4116 btrfs_mark_buffer_dirty(right);
4120 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4123 BUG_ON(num_doubles != 0);
4131 push_for_double_split(trans, root, path, data_size);
4132 tried_avoid_double = 1;
4133 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4138 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4139 struct btrfs_root *root,
4140 struct btrfs_path *path, int ins_len)
4142 struct btrfs_key key;
4143 struct extent_buffer *leaf;
4144 struct btrfs_file_extent_item *fi;
4149 leaf = path->nodes[0];
4150 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4152 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4153 key.type != BTRFS_EXTENT_CSUM_KEY);
4155 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4158 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4159 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4160 fi = btrfs_item_ptr(leaf, path->slots[0],
4161 struct btrfs_file_extent_item);
4162 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4164 btrfs_release_path(path);
4166 path->keep_locks = 1;
4167 path->search_for_split = 1;
4168 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4169 path->search_for_split = 0;
4174 leaf = path->nodes[0];
4175 /* if our item isn't there or got smaller, return now */
4176 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4179 /* the leaf has changed, it now has room. return now */
4180 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4183 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4184 fi = btrfs_item_ptr(leaf, path->slots[0],
4185 struct btrfs_file_extent_item);
4186 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4190 btrfs_set_path_blocking(path);
4191 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4195 path->keep_locks = 0;
4196 btrfs_unlock_up_safe(path, 1);
4199 path->keep_locks = 0;
4203 static noinline int split_item(struct btrfs_trans_handle *trans,
4204 struct btrfs_root *root,
4205 struct btrfs_path *path,
4206 struct btrfs_key *new_key,
4207 unsigned long split_offset)
4209 struct extent_buffer *leaf;
4210 struct btrfs_item *item;
4211 struct btrfs_item *new_item;
4217 struct btrfs_disk_key disk_key;
4219 leaf = path->nodes[0];
4220 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4222 btrfs_set_path_blocking(path);
4224 item = btrfs_item_nr(path->slots[0]);
4225 orig_offset = btrfs_item_offset(leaf, item);
4226 item_size = btrfs_item_size(leaf, item);
4228 buf = kmalloc(item_size, GFP_NOFS);
4232 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4233 path->slots[0]), item_size);
4235 slot = path->slots[0] + 1;
4236 nritems = btrfs_header_nritems(leaf);
4237 if (slot != nritems) {
4238 /* shift the items */
4239 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4240 btrfs_item_nr_offset(slot),
4241 (nritems - slot) * sizeof(struct btrfs_item));
4244 btrfs_cpu_key_to_disk(&disk_key, new_key);
4245 btrfs_set_item_key(leaf, &disk_key, slot);
4247 new_item = btrfs_item_nr(slot);
4249 btrfs_set_item_offset(leaf, new_item, orig_offset);
4250 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4252 btrfs_set_item_offset(leaf, item,
4253 orig_offset + item_size - split_offset);
4254 btrfs_set_item_size(leaf, item, split_offset);
4256 btrfs_set_header_nritems(leaf, nritems + 1);
4258 /* write the data for the start of the original item */
4259 write_extent_buffer(leaf, buf,
4260 btrfs_item_ptr_offset(leaf, path->slots[0]),
4263 /* write the data for the new item */
4264 write_extent_buffer(leaf, buf + split_offset,
4265 btrfs_item_ptr_offset(leaf, slot),
4266 item_size - split_offset);
4267 btrfs_mark_buffer_dirty(leaf);
4269 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4275 * This function splits a single item into two items,
4276 * giving 'new_key' to the new item and splitting the
4277 * old one at split_offset (from the start of the item).
4279 * The path may be released by this operation. After
4280 * the split, the path is pointing to the old item. The
4281 * new item is going to be in the same node as the old one.
4283 * Note, the item being split must be smaller enough to live alone on
4284 * a tree block with room for one extra struct btrfs_item
4286 * This allows us to split the item in place, keeping a lock on the
4287 * leaf the entire time.
4289 int btrfs_split_item(struct btrfs_trans_handle *trans,
4290 struct btrfs_root *root,
4291 struct btrfs_path *path,
4292 struct btrfs_key *new_key,
4293 unsigned long split_offset)
4296 ret = setup_leaf_for_split(trans, root, path,
4297 sizeof(struct btrfs_item));
4301 ret = split_item(trans, root, path, new_key, split_offset);
4306 * This function duplicate a item, giving 'new_key' to the new item.
4307 * It guarantees both items live in the same tree leaf and the new item
4308 * is contiguous with the original item.
4310 * This allows us to split file extent in place, keeping a lock on the
4311 * leaf the entire time.
4313 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4314 struct btrfs_root *root,
4315 struct btrfs_path *path,
4316 struct btrfs_key *new_key)
4318 struct extent_buffer *leaf;
4322 leaf = path->nodes[0];
4323 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4324 ret = setup_leaf_for_split(trans, root, path,
4325 item_size + sizeof(struct btrfs_item));
4330 setup_items_for_insert(root, path, new_key, &item_size,
4331 item_size, item_size +
4332 sizeof(struct btrfs_item), 1);
4333 leaf = path->nodes[0];
4334 memcpy_extent_buffer(leaf,
4335 btrfs_item_ptr_offset(leaf, path->slots[0]),
4336 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4342 * make the item pointed to by the path smaller. new_size indicates
4343 * how small to make it, and from_end tells us if we just chop bytes
4344 * off the end of the item or if we shift the item to chop bytes off
4347 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4348 u32 new_size, int from_end)
4351 struct extent_buffer *leaf;
4352 struct btrfs_item *item;
4354 unsigned int data_end;
4355 unsigned int old_data_start;
4356 unsigned int old_size;
4357 unsigned int size_diff;
4359 struct btrfs_map_token token;
4361 btrfs_init_map_token(&token);
4363 leaf = path->nodes[0];
4364 slot = path->slots[0];
4366 old_size = btrfs_item_size_nr(leaf, slot);
4367 if (old_size == new_size)
4370 nritems = btrfs_header_nritems(leaf);
4371 data_end = leaf_data_end(root, leaf);
4373 old_data_start = btrfs_item_offset_nr(leaf, slot);
4375 size_diff = old_size - new_size;
4378 BUG_ON(slot >= nritems);
4381 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4383 /* first correct the data pointers */
4384 for (i = slot; i < nritems; i++) {
4386 item = btrfs_item_nr(i);
4388 ioff = btrfs_token_item_offset(leaf, item, &token);
4389 btrfs_set_token_item_offset(leaf, item,
4390 ioff + size_diff, &token);
4393 /* shift the data */
4395 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4396 data_end + size_diff, btrfs_leaf_data(leaf) +
4397 data_end, old_data_start + new_size - data_end);
4399 struct btrfs_disk_key disk_key;
4402 btrfs_item_key(leaf, &disk_key, slot);
4404 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4406 struct btrfs_file_extent_item *fi;
4408 fi = btrfs_item_ptr(leaf, slot,
4409 struct btrfs_file_extent_item);
4410 fi = (struct btrfs_file_extent_item *)(
4411 (unsigned long)fi - size_diff);
4413 if (btrfs_file_extent_type(leaf, fi) ==
4414 BTRFS_FILE_EXTENT_INLINE) {
4415 ptr = btrfs_item_ptr_offset(leaf, slot);
4416 memmove_extent_buffer(leaf, ptr,
4418 offsetof(struct btrfs_file_extent_item,
4423 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4424 data_end + size_diff, btrfs_leaf_data(leaf) +
4425 data_end, old_data_start - data_end);
4427 offset = btrfs_disk_key_offset(&disk_key);
4428 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4429 btrfs_set_item_key(leaf, &disk_key, slot);
4431 fixup_low_keys(root, path, &disk_key, 1);
4434 item = btrfs_item_nr(slot);
4435 btrfs_set_item_size(leaf, item, new_size);
4436 btrfs_mark_buffer_dirty(leaf);
4438 if (btrfs_leaf_free_space(root, leaf) < 0) {
4439 btrfs_print_leaf(root, leaf);
4445 * make the item pointed to by the path bigger, data_size is the added size.
4447 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4451 struct extent_buffer *leaf;
4452 struct btrfs_item *item;
4454 unsigned int data_end;
4455 unsigned int old_data;
4456 unsigned int old_size;
4458 struct btrfs_map_token token;
4460 btrfs_init_map_token(&token);
4462 leaf = path->nodes[0];
4464 nritems = btrfs_header_nritems(leaf);
4465 data_end = leaf_data_end(root, leaf);
4467 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4468 btrfs_print_leaf(root, leaf);
4471 slot = path->slots[0];
4472 old_data = btrfs_item_end_nr(leaf, slot);
4475 if (slot >= nritems) {
4476 btrfs_print_leaf(root, leaf);
4477 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4483 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4485 /* first correct the data pointers */
4486 for (i = slot; i < nritems; i++) {
4488 item = btrfs_item_nr(i);
4490 ioff = btrfs_token_item_offset(leaf, item, &token);
4491 btrfs_set_token_item_offset(leaf, item,
4492 ioff - data_size, &token);
4495 /* shift the data */
4496 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4497 data_end - data_size, btrfs_leaf_data(leaf) +
4498 data_end, old_data - data_end);
4500 data_end = old_data;
4501 old_size = btrfs_item_size_nr(leaf, slot);
4502 item = btrfs_item_nr(slot);
4503 btrfs_set_item_size(leaf, item, old_size + data_size);
4504 btrfs_mark_buffer_dirty(leaf);
4506 if (btrfs_leaf_free_space(root, leaf) < 0) {
4507 btrfs_print_leaf(root, leaf);
4513 * this is a helper for btrfs_insert_empty_items, the main goal here is
4514 * to save stack depth by doing the bulk of the work in a function
4515 * that doesn't call btrfs_search_slot
4517 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4518 struct btrfs_key *cpu_key, u32 *data_size,
4519 u32 total_data, u32 total_size, int nr)
4521 struct btrfs_item *item;
4524 unsigned int data_end;
4525 struct btrfs_disk_key disk_key;
4526 struct extent_buffer *leaf;
4528 struct btrfs_map_token token;
4530 btrfs_init_map_token(&token);
4532 leaf = path->nodes[0];
4533 slot = path->slots[0];
4535 nritems = btrfs_header_nritems(leaf);
4536 data_end = leaf_data_end(root, leaf);
4538 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4539 btrfs_print_leaf(root, leaf);
4540 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4541 total_size, btrfs_leaf_free_space(root, leaf));
4545 if (slot != nritems) {
4546 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4548 if (old_data < data_end) {
4549 btrfs_print_leaf(root, leaf);
4550 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4551 slot, old_data, data_end);
4555 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4557 /* first correct the data pointers */
4558 for (i = slot; i < nritems; i++) {
4561 item = btrfs_item_nr( i);
4562 ioff = btrfs_token_item_offset(leaf, item, &token);
4563 btrfs_set_token_item_offset(leaf, item,
4564 ioff - total_data, &token);
4566 /* shift the items */
4567 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4568 btrfs_item_nr_offset(slot),
4569 (nritems - slot) * sizeof(struct btrfs_item));
4571 /* shift the data */
4572 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4573 data_end - total_data, btrfs_leaf_data(leaf) +
4574 data_end, old_data - data_end);
4575 data_end = old_data;
4578 /* setup the item for the new data */
4579 for (i = 0; i < nr; i++) {
4580 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4581 btrfs_set_item_key(leaf, &disk_key, slot + i);
4582 item = btrfs_item_nr(slot + i);
4583 btrfs_set_token_item_offset(leaf, item,
4584 data_end - data_size[i], &token);
4585 data_end -= data_size[i];
4586 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4589 btrfs_set_header_nritems(leaf, nritems + nr);
4592 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4593 fixup_low_keys(root, path, &disk_key, 1);
4595 btrfs_unlock_up_safe(path, 1);
4596 btrfs_mark_buffer_dirty(leaf);
4598 if (btrfs_leaf_free_space(root, leaf) < 0) {
4599 btrfs_print_leaf(root, leaf);
4605 * Given a key and some data, insert items into the tree.
4606 * This does all the path init required, making room in the tree if needed.
4608 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4609 struct btrfs_root *root,
4610 struct btrfs_path *path,
4611 struct btrfs_key *cpu_key, u32 *data_size,
4620 for (i = 0; i < nr; i++)
4621 total_data += data_size[i];
4623 total_size = total_data + (nr * sizeof(struct btrfs_item));
4624 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4630 slot = path->slots[0];
4633 setup_items_for_insert(root, path, cpu_key, data_size,
4634 total_data, total_size, nr);
4639 * Given a key and some data, insert an item into the tree.
4640 * This does all the path init required, making room in the tree if needed.
4642 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4643 *root, struct btrfs_key *cpu_key, void *data, u32
4647 struct btrfs_path *path;
4648 struct extent_buffer *leaf;
4651 path = btrfs_alloc_path();
4654 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4656 leaf = path->nodes[0];
4657 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4658 write_extent_buffer(leaf, data, ptr, data_size);
4659 btrfs_mark_buffer_dirty(leaf);
4661 btrfs_free_path(path);
4666 * delete the pointer from a given node.
4668 * the tree should have been previously balanced so the deletion does not
4671 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4672 int level, int slot)
4674 struct extent_buffer *parent = path->nodes[level];
4678 nritems = btrfs_header_nritems(parent);
4679 if (slot != nritems - 1) {
4681 tree_mod_log_eb_move(root->fs_info, parent, slot,
4682 slot + 1, nritems - slot - 1);
4683 memmove_extent_buffer(parent,
4684 btrfs_node_key_ptr_offset(slot),
4685 btrfs_node_key_ptr_offset(slot + 1),
4686 sizeof(struct btrfs_key_ptr) *
4687 (nritems - slot - 1));
4689 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4690 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4695 btrfs_set_header_nritems(parent, nritems);
4696 if (nritems == 0 && parent == root->node) {
4697 BUG_ON(btrfs_header_level(root->node) != 1);
4698 /* just turn the root into a leaf and break */
4699 btrfs_set_header_level(root->node, 0);
4700 } else if (slot == 0) {
4701 struct btrfs_disk_key disk_key;
4703 btrfs_node_key(parent, &disk_key, 0);
4704 fixup_low_keys(root, path, &disk_key, level + 1);
4706 btrfs_mark_buffer_dirty(parent);
4710 * a helper function to delete the leaf pointed to by path->slots[1] and
4713 * This deletes the pointer in path->nodes[1] and frees the leaf
4714 * block extent. zero is returned if it all worked out, < 0 otherwise.
4716 * The path must have already been setup for deleting the leaf, including
4717 * all the proper balancing. path->nodes[1] must be locked.
4719 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4720 struct btrfs_root *root,
4721 struct btrfs_path *path,
4722 struct extent_buffer *leaf)
4724 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4725 del_ptr(root, path, 1, path->slots[1]);
4728 * btrfs_free_extent is expensive, we want to make sure we
4729 * aren't holding any locks when we call it
4731 btrfs_unlock_up_safe(path, 0);
4733 root_sub_used(root, leaf->len);
4735 extent_buffer_get(leaf);
4736 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4737 free_extent_buffer_stale(leaf);
4740 * delete the item at the leaf level in path. If that empties
4741 * the leaf, remove it from the tree
4743 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4744 struct btrfs_path *path, int slot, int nr)
4746 struct extent_buffer *leaf;
4747 struct btrfs_item *item;
4754 struct btrfs_map_token token;
4756 btrfs_init_map_token(&token);
4758 leaf = path->nodes[0];
4759 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4761 for (i = 0; i < nr; i++)
4762 dsize += btrfs_item_size_nr(leaf, slot + i);
4764 nritems = btrfs_header_nritems(leaf);
4766 if (slot + nr != nritems) {
4767 int data_end = leaf_data_end(root, leaf);
4769 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4771 btrfs_leaf_data(leaf) + data_end,
4772 last_off - data_end);
4774 for (i = slot + nr; i < nritems; i++) {
4777 item = btrfs_item_nr(i);
4778 ioff = btrfs_token_item_offset(leaf, item, &token);
4779 btrfs_set_token_item_offset(leaf, item,
4780 ioff + dsize, &token);
4783 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4784 btrfs_item_nr_offset(slot + nr),
4785 sizeof(struct btrfs_item) *
4786 (nritems - slot - nr));
4788 btrfs_set_header_nritems(leaf, nritems - nr);
4791 /* delete the leaf if we've emptied it */
4793 if (leaf == root->node) {
4794 btrfs_set_header_level(leaf, 0);
4796 btrfs_set_path_blocking(path);
4797 clean_tree_block(trans, root, leaf);
4798 btrfs_del_leaf(trans, root, path, leaf);
4801 int used = leaf_space_used(leaf, 0, nritems);
4803 struct btrfs_disk_key disk_key;
4805 btrfs_item_key(leaf, &disk_key, 0);
4806 fixup_low_keys(root, path, &disk_key, 1);
4809 /* delete the leaf if it is mostly empty */
4810 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4811 /* push_leaf_left fixes the path.
4812 * make sure the path still points to our leaf
4813 * for possible call to del_ptr below
4815 slot = path->slots[1];
4816 extent_buffer_get(leaf);
4818 btrfs_set_path_blocking(path);
4819 wret = push_leaf_left(trans, root, path, 1, 1,
4821 if (wret < 0 && wret != -ENOSPC)
4824 if (path->nodes[0] == leaf &&
4825 btrfs_header_nritems(leaf)) {
4826 wret = push_leaf_right(trans, root, path, 1,
4828 if (wret < 0 && wret != -ENOSPC)
4832 if (btrfs_header_nritems(leaf) == 0) {
4833 path->slots[1] = slot;
4834 btrfs_del_leaf(trans, root, path, leaf);
4835 free_extent_buffer(leaf);
4838 /* if we're still in the path, make sure
4839 * we're dirty. Otherwise, one of the
4840 * push_leaf functions must have already
4841 * dirtied this buffer
4843 if (path->nodes[0] == leaf)
4844 btrfs_mark_buffer_dirty(leaf);
4845 free_extent_buffer(leaf);
4848 btrfs_mark_buffer_dirty(leaf);
4855 * search the tree again to find a leaf with lesser keys
4856 * returns 0 if it found something or 1 if there are no lesser leaves.
4857 * returns < 0 on io errors.
4859 * This may release the path, and so you may lose any locks held at the
4862 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4864 struct btrfs_key key;
4865 struct btrfs_disk_key found_key;
4868 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4870 if (key.offset > 0) {
4872 } else if (key.type > 0) {
4874 key.offset = (u64)-1;
4875 } else if (key.objectid > 0) {
4878 key.offset = (u64)-1;
4883 btrfs_release_path(path);
4884 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4887 btrfs_item_key(path->nodes[0], &found_key, 0);
4888 ret = comp_keys(&found_key, &key);
4895 * A helper function to walk down the tree starting at min_key, and looking
4896 * for nodes or leaves that are have a minimum transaction id.
4897 * This is used by the btree defrag code, and tree logging
4899 * This does not cow, but it does stuff the starting key it finds back
4900 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4901 * key and get a writable path.
4903 * This does lock as it descends, and path->keep_locks should be set
4904 * to 1 by the caller.
4906 * This honors path->lowest_level to prevent descent past a given level
4909 * min_trans indicates the oldest transaction that you are interested
4910 * in walking through. Any nodes or leaves older than min_trans are
4911 * skipped over (without reading them).
4913 * returns zero if something useful was found, < 0 on error and 1 if there
4914 * was nothing in the tree that matched the search criteria.
4916 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4917 struct btrfs_path *path,
4920 struct extent_buffer *cur;
4921 struct btrfs_key found_key;
4928 WARN_ON(!path->keep_locks);
4930 cur = btrfs_read_lock_root_node(root);
4931 level = btrfs_header_level(cur);
4932 WARN_ON(path->nodes[level]);
4933 path->nodes[level] = cur;
4934 path->locks[level] = BTRFS_READ_LOCK;
4936 if (btrfs_header_generation(cur) < min_trans) {
4941 nritems = btrfs_header_nritems(cur);
4942 level = btrfs_header_level(cur);
4943 sret = bin_search(cur, min_key, level, &slot);
4945 /* at the lowest level, we're done, setup the path and exit */
4946 if (level == path->lowest_level) {
4947 if (slot >= nritems)
4950 path->slots[level] = slot;
4951 btrfs_item_key_to_cpu(cur, &found_key, slot);
4954 if (sret && slot > 0)
4957 * check this node pointer against the min_trans parameters.
4958 * If it is too old, old, skip to the next one.
4960 while (slot < nritems) {
4963 gen = btrfs_node_ptr_generation(cur, slot);
4964 if (gen < min_trans) {
4972 * we didn't find a candidate key in this node, walk forward
4973 * and find another one
4975 if (slot >= nritems) {
4976 path->slots[level] = slot;
4977 btrfs_set_path_blocking(path);
4978 sret = btrfs_find_next_key(root, path, min_key, level,
4981 btrfs_release_path(path);
4987 /* save our key for returning back */
4988 btrfs_node_key_to_cpu(cur, &found_key, slot);
4989 path->slots[level] = slot;
4990 if (level == path->lowest_level) {
4992 unlock_up(path, level, 1, 0, NULL);
4995 btrfs_set_path_blocking(path);
4996 cur = read_node_slot(root, cur, slot);
4997 BUG_ON(!cur); /* -ENOMEM */
4999 btrfs_tree_read_lock(cur);
5001 path->locks[level - 1] = BTRFS_READ_LOCK;
5002 path->nodes[level - 1] = cur;
5003 unlock_up(path, level, 1, 0, NULL);
5004 btrfs_clear_path_blocking(path, NULL, 0);
5008 memcpy(min_key, &found_key, sizeof(found_key));
5009 btrfs_set_path_blocking(path);
5013 static void tree_move_down(struct btrfs_root *root,
5014 struct btrfs_path *path,
5015 int *level, int root_level)
5017 BUG_ON(*level == 0);
5018 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5019 path->slots[*level]);
5020 path->slots[*level - 1] = 0;
5024 static int tree_move_next_or_upnext(struct btrfs_root *root,
5025 struct btrfs_path *path,
5026 int *level, int root_level)
5030 nritems = btrfs_header_nritems(path->nodes[*level]);
5032 path->slots[*level]++;
5034 while (path->slots[*level] >= nritems) {
5035 if (*level == root_level)
5039 path->slots[*level] = 0;
5040 free_extent_buffer(path->nodes[*level]);
5041 path->nodes[*level] = NULL;
5043 path->slots[*level]++;
5045 nritems = btrfs_header_nritems(path->nodes[*level]);
5052 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5055 static int tree_advance(struct btrfs_root *root,
5056 struct btrfs_path *path,
5057 int *level, int root_level,
5059 struct btrfs_key *key)
5063 if (*level == 0 || !allow_down) {
5064 ret = tree_move_next_or_upnext(root, path, level, root_level);
5066 tree_move_down(root, path, level, root_level);
5071 btrfs_item_key_to_cpu(path->nodes[*level], key,
5072 path->slots[*level]);
5074 btrfs_node_key_to_cpu(path->nodes[*level], key,
5075 path->slots[*level]);
5080 static int tree_compare_item(struct btrfs_root *left_root,
5081 struct btrfs_path *left_path,
5082 struct btrfs_path *right_path,
5087 unsigned long off1, off2;
5089 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5090 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5094 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5095 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5096 right_path->slots[0]);
5098 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5100 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5107 #define ADVANCE_ONLY_NEXT -1
5110 * This function compares two trees and calls the provided callback for
5111 * every changed/new/deleted item it finds.
5112 * If shared tree blocks are encountered, whole subtrees are skipped, making
5113 * the compare pretty fast on snapshotted subvolumes.
5115 * This currently works on commit roots only. As commit roots are read only,
5116 * we don't do any locking. The commit roots are protected with transactions.
5117 * Transactions are ended and rejoined when a commit is tried in between.
5119 * This function checks for modifications done to the trees while comparing.
5120 * If it detects a change, it aborts immediately.
5122 int btrfs_compare_trees(struct btrfs_root *left_root,
5123 struct btrfs_root *right_root,
5124 btrfs_changed_cb_t changed_cb, void *ctx)
5128 struct btrfs_trans_handle *trans = NULL;
5129 struct btrfs_path *left_path = NULL;
5130 struct btrfs_path *right_path = NULL;
5131 struct btrfs_key left_key;
5132 struct btrfs_key right_key;
5133 char *tmp_buf = NULL;
5134 int left_root_level;
5135 int right_root_level;
5138 int left_end_reached;
5139 int right_end_reached;
5144 u64 left_start_ctransid;
5145 u64 right_start_ctransid;
5148 left_path = btrfs_alloc_path();
5153 right_path = btrfs_alloc_path();
5159 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5165 left_path->search_commit_root = 1;
5166 left_path->skip_locking = 1;
5167 right_path->search_commit_root = 1;
5168 right_path->skip_locking = 1;
5170 spin_lock(&left_root->root_item_lock);
5171 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5172 spin_unlock(&left_root->root_item_lock);
5174 spin_lock(&right_root->root_item_lock);
5175 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5176 spin_unlock(&right_root->root_item_lock);
5178 trans = btrfs_join_transaction(left_root);
5179 if (IS_ERR(trans)) {
5180 ret = PTR_ERR(trans);
5186 * Strategy: Go to the first items of both trees. Then do
5188 * If both trees are at level 0
5189 * Compare keys of current items
5190 * If left < right treat left item as new, advance left tree
5192 * If left > right treat right item as deleted, advance right tree
5194 * If left == right do deep compare of items, treat as changed if
5195 * needed, advance both trees and repeat
5196 * If both trees are at the same level but not at level 0
5197 * Compare keys of current nodes/leafs
5198 * If left < right advance left tree and repeat
5199 * If left > right advance right tree and repeat
5200 * If left == right compare blockptrs of the next nodes/leafs
5201 * If they match advance both trees but stay at the same level
5203 * If they don't match advance both trees while allowing to go
5205 * If tree levels are different
5206 * Advance the tree that needs it and repeat
5208 * Advancing a tree means:
5209 * If we are at level 0, try to go to the next slot. If that's not
5210 * possible, go one level up and repeat. Stop when we found a level
5211 * where we could go to the next slot. We may at this point be on a
5214 * If we are not at level 0 and not on shared tree blocks, go one
5217 * If we are not at level 0 and on shared tree blocks, go one slot to
5218 * the right if possible or go up and right.
5221 left_level = btrfs_header_level(left_root->commit_root);
5222 left_root_level = left_level;
5223 left_path->nodes[left_level] = left_root->commit_root;
5224 extent_buffer_get(left_path->nodes[left_level]);
5226 right_level = btrfs_header_level(right_root->commit_root);
5227 right_root_level = right_level;
5228 right_path->nodes[right_level] = right_root->commit_root;
5229 extent_buffer_get(right_path->nodes[right_level]);
5231 if (left_level == 0)
5232 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5233 &left_key, left_path->slots[left_level]);
5235 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5236 &left_key, left_path->slots[left_level]);
5237 if (right_level == 0)
5238 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5239 &right_key, right_path->slots[right_level]);
5241 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5242 &right_key, right_path->slots[right_level]);
5244 left_end_reached = right_end_reached = 0;
5245 advance_left = advance_right = 0;
5249 * We need to make sure the transaction does not get committed
5250 * while we do anything on commit roots. This means, we need to
5251 * join and leave transactions for every item that we process.
5253 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5254 btrfs_release_path(left_path);
5255 btrfs_release_path(right_path);
5257 ret = btrfs_end_transaction(trans, left_root);
5262 /* now rejoin the transaction */
5264 trans = btrfs_join_transaction(left_root);
5265 if (IS_ERR(trans)) {
5266 ret = PTR_ERR(trans);
5271 spin_lock(&left_root->root_item_lock);
5272 ctransid = btrfs_root_ctransid(&left_root->root_item);
5273 spin_unlock(&left_root->root_item_lock);
5274 if (ctransid != left_start_ctransid)
5275 left_start_ctransid = 0;
5277 spin_lock(&right_root->root_item_lock);
5278 ctransid = btrfs_root_ctransid(&right_root->root_item);
5279 spin_unlock(&right_root->root_item_lock);
5280 if (ctransid != right_start_ctransid)
5281 right_start_ctransid = 0;
5283 if (!left_start_ctransid || !right_start_ctransid) {
5284 WARN(1, KERN_WARNING
5285 "btrfs: btrfs_compare_tree detected "
5286 "a change in one of the trees while "
5287 "iterating. This is probably a "
5294 * the commit root may have changed, so start again
5297 left_path->lowest_level = left_level;
5298 right_path->lowest_level = right_level;
5299 ret = btrfs_search_slot(NULL, left_root,
5300 &left_key, left_path, 0, 0);
5303 ret = btrfs_search_slot(NULL, right_root,
5304 &right_key, right_path, 0, 0);
5309 if (advance_left && !left_end_reached) {
5310 ret = tree_advance(left_root, left_path, &left_level,
5312 advance_left != ADVANCE_ONLY_NEXT,
5315 left_end_reached = ADVANCE;
5318 if (advance_right && !right_end_reached) {
5319 ret = tree_advance(right_root, right_path, &right_level,
5321 advance_right != ADVANCE_ONLY_NEXT,
5324 right_end_reached = ADVANCE;
5328 if (left_end_reached && right_end_reached) {
5331 } else if (left_end_reached) {
5332 if (right_level == 0) {
5333 ret = changed_cb(left_root, right_root,
5334 left_path, right_path,
5336 BTRFS_COMPARE_TREE_DELETED,
5341 advance_right = ADVANCE;
5343 } else if (right_end_reached) {
5344 if (left_level == 0) {
5345 ret = changed_cb(left_root, right_root,
5346 left_path, right_path,
5348 BTRFS_COMPARE_TREE_NEW,
5353 advance_left = ADVANCE;
5357 if (left_level == 0 && right_level == 0) {
5358 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5360 ret = changed_cb(left_root, right_root,
5361 left_path, right_path,
5363 BTRFS_COMPARE_TREE_NEW,
5367 advance_left = ADVANCE;
5368 } else if (cmp > 0) {
5369 ret = changed_cb(left_root, right_root,
5370 left_path, right_path,
5372 BTRFS_COMPARE_TREE_DELETED,
5376 advance_right = ADVANCE;
5378 enum btrfs_compare_tree_result cmp;
5380 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5381 ret = tree_compare_item(left_root, left_path,
5382 right_path, tmp_buf);
5384 cmp = BTRFS_COMPARE_TREE_CHANGED;
5386 cmp = BTRFS_COMPARE_TREE_SAME;
5387 ret = changed_cb(left_root, right_root,
5388 left_path, right_path,
5389 &left_key, cmp, ctx);
5392 advance_left = ADVANCE;
5393 advance_right = ADVANCE;
5395 } else if (left_level == right_level) {
5396 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5398 advance_left = ADVANCE;
5399 } else if (cmp > 0) {
5400 advance_right = ADVANCE;
5402 left_blockptr = btrfs_node_blockptr(
5403 left_path->nodes[left_level],
5404 left_path->slots[left_level]);
5405 right_blockptr = btrfs_node_blockptr(
5406 right_path->nodes[right_level],
5407 right_path->slots[right_level]);
5408 if (left_blockptr == right_blockptr) {
5410 * As we're on a shared block, don't
5411 * allow to go deeper.
5413 advance_left = ADVANCE_ONLY_NEXT;
5414 advance_right = ADVANCE_ONLY_NEXT;
5416 advance_left = ADVANCE;
5417 advance_right = ADVANCE;
5420 } else if (left_level < right_level) {
5421 advance_right = ADVANCE;
5423 advance_left = ADVANCE;
5428 btrfs_free_path(left_path);
5429 btrfs_free_path(right_path);
5434 ret = btrfs_end_transaction(trans, left_root);
5436 btrfs_end_transaction(trans, left_root);
5443 * this is similar to btrfs_next_leaf, but does not try to preserve
5444 * and fixup the path. It looks for and returns the next key in the
5445 * tree based on the current path and the min_trans parameters.
5447 * 0 is returned if another key is found, < 0 if there are any errors
5448 * and 1 is returned if there are no higher keys in the tree
5450 * path->keep_locks should be set to 1 on the search made before
5451 * calling this function.
5453 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5454 struct btrfs_key *key, int level, u64 min_trans)
5457 struct extent_buffer *c;
5459 WARN_ON(!path->keep_locks);
5460 while (level < BTRFS_MAX_LEVEL) {
5461 if (!path->nodes[level])
5464 slot = path->slots[level] + 1;
5465 c = path->nodes[level];
5467 if (slot >= btrfs_header_nritems(c)) {
5470 struct btrfs_key cur_key;
5471 if (level + 1 >= BTRFS_MAX_LEVEL ||
5472 !path->nodes[level + 1])
5475 if (path->locks[level + 1]) {
5480 slot = btrfs_header_nritems(c) - 1;
5482 btrfs_item_key_to_cpu(c, &cur_key, slot);
5484 btrfs_node_key_to_cpu(c, &cur_key, slot);
5486 orig_lowest = path->lowest_level;
5487 btrfs_release_path(path);
5488 path->lowest_level = level;
5489 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5491 path->lowest_level = orig_lowest;
5495 c = path->nodes[level];
5496 slot = path->slots[level];
5503 btrfs_item_key_to_cpu(c, key, slot);
5505 u64 gen = btrfs_node_ptr_generation(c, slot);
5507 if (gen < min_trans) {
5511 btrfs_node_key_to_cpu(c, key, slot);
5519 * search the tree again to find a leaf with greater keys
5520 * returns 0 if it found something or 1 if there are no greater leaves.
5521 * returns < 0 on io errors.
5523 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5525 return btrfs_next_old_leaf(root, path, 0);
5528 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5533 struct extent_buffer *c;
5534 struct extent_buffer *next;
5535 struct btrfs_key key;
5538 int old_spinning = path->leave_spinning;
5539 int next_rw_lock = 0;
5541 nritems = btrfs_header_nritems(path->nodes[0]);
5545 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5550 btrfs_release_path(path);
5552 path->keep_locks = 1;
5553 path->leave_spinning = 1;
5556 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5558 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5559 path->keep_locks = 0;
5564 nritems = btrfs_header_nritems(path->nodes[0]);
5566 * by releasing the path above we dropped all our locks. A balance
5567 * could have added more items next to the key that used to be
5568 * at the very end of the block. So, check again here and
5569 * advance the path if there are now more items available.
5571 if (nritems > 0 && path->slots[0] < nritems - 1) {
5578 while (level < BTRFS_MAX_LEVEL) {
5579 if (!path->nodes[level]) {
5584 slot = path->slots[level] + 1;
5585 c = path->nodes[level];
5586 if (slot >= btrfs_header_nritems(c)) {
5588 if (level == BTRFS_MAX_LEVEL) {
5596 btrfs_tree_unlock_rw(next, next_rw_lock);
5597 free_extent_buffer(next);
5601 next_rw_lock = path->locks[level];
5602 ret = read_block_for_search(NULL, root, path, &next, level,
5608 btrfs_release_path(path);
5612 if (!path->skip_locking) {
5613 ret = btrfs_try_tree_read_lock(next);
5614 if (!ret && time_seq) {
5616 * If we don't get the lock, we may be racing
5617 * with push_leaf_left, holding that lock while
5618 * itself waiting for the leaf we've currently
5619 * locked. To solve this situation, we give up
5620 * on our lock and cycle.
5622 free_extent_buffer(next);
5623 btrfs_release_path(path);
5628 btrfs_set_path_blocking(path);
5629 btrfs_tree_read_lock(next);
5630 btrfs_clear_path_blocking(path, next,
5633 next_rw_lock = BTRFS_READ_LOCK;
5637 path->slots[level] = slot;
5640 c = path->nodes[level];
5641 if (path->locks[level])
5642 btrfs_tree_unlock_rw(c, path->locks[level]);
5644 free_extent_buffer(c);
5645 path->nodes[level] = next;
5646 path->slots[level] = 0;
5647 if (!path->skip_locking)
5648 path->locks[level] = next_rw_lock;
5652 ret = read_block_for_search(NULL, root, path, &next, level,
5658 btrfs_release_path(path);
5662 if (!path->skip_locking) {
5663 ret = btrfs_try_tree_read_lock(next);
5665 btrfs_set_path_blocking(path);
5666 btrfs_tree_read_lock(next);
5667 btrfs_clear_path_blocking(path, next,
5670 next_rw_lock = BTRFS_READ_LOCK;
5675 unlock_up(path, 0, 1, 0, NULL);
5676 path->leave_spinning = old_spinning;
5678 btrfs_set_path_blocking(path);
5684 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5685 * searching until it gets past min_objectid or finds an item of 'type'
5687 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5689 int btrfs_previous_item(struct btrfs_root *root,
5690 struct btrfs_path *path, u64 min_objectid,
5693 struct btrfs_key found_key;
5694 struct extent_buffer *leaf;
5699 if (path->slots[0] == 0) {
5700 btrfs_set_path_blocking(path);
5701 ret = btrfs_prev_leaf(root, path);
5707 leaf = path->nodes[0];
5708 nritems = btrfs_header_nritems(leaf);
5711 if (path->slots[0] == nritems)
5714 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5715 if (found_key.objectid < min_objectid)
5717 if (found_key.type == type)
5719 if (found_key.objectid == min_objectid &&
5720 found_key.type < type)