]> Pileus Git - ~andy/linux/blob - fs/btrfs/delayed-ref.c
Merge remote-tracking branches 'asoc/fix/88pm860', 'asoc/fix/omap' and 'asoc/fix...
[~andy/linux] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 /*
31  * delayed back reference update tracking.  For subvolume trees
32  * we queue up extent allocations and backref maintenance for
33  * delayed processing.   This avoids deep call chains where we
34  * add extents in the middle of btrfs_search_slot, and it allows
35  * us to buffer up frequently modified backrefs in an rb tree instead
36  * of hammering updates on the extent allocation tree.
37  */
38
39 /*
40  * compare two delayed tree backrefs with same bytenr and type
41  */
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43                           struct btrfs_delayed_tree_ref *ref1, int type)
44 {
45         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46                 if (ref1->root < ref2->root)
47                         return -1;
48                 if (ref1->root > ref2->root)
49                         return 1;
50         } else {
51                 if (ref1->parent < ref2->parent)
52                         return -1;
53                 if (ref1->parent > ref2->parent)
54                         return 1;
55         }
56         return 0;
57 }
58
59 /*
60  * compare two delayed data backrefs with same bytenr and type
61  */
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63                           struct btrfs_delayed_data_ref *ref1)
64 {
65         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66                 if (ref1->root < ref2->root)
67                         return -1;
68                 if (ref1->root > ref2->root)
69                         return 1;
70                 if (ref1->objectid < ref2->objectid)
71                         return -1;
72                 if (ref1->objectid > ref2->objectid)
73                         return 1;
74                 if (ref1->offset < ref2->offset)
75                         return -1;
76                 if (ref1->offset > ref2->offset)
77                         return 1;
78         } else {
79                 if (ref1->parent < ref2->parent)
80                         return -1;
81                 if (ref1->parent > ref2->parent)
82                         return 1;
83         }
84         return 0;
85 }
86
87 /*
88  * entries in the rb tree are ordered by the byte number of the extent,
89  * type of the delayed backrefs and content of delayed backrefs.
90  */
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92                       struct btrfs_delayed_ref_node *ref1,
93                       bool compare_seq)
94 {
95         if (ref1->bytenr < ref2->bytenr)
96                 return -1;
97         if (ref1->bytenr > ref2->bytenr)
98                 return 1;
99         if (ref1->is_head && ref2->is_head)
100                 return 0;
101         if (ref2->is_head)
102                 return -1;
103         if (ref1->is_head)
104                 return 1;
105         if (ref1->type < ref2->type)
106                 return -1;
107         if (ref1->type > ref2->type)
108                 return 1;
109         /* merging of sequenced refs is not allowed */
110         if (compare_seq) {
111                 if (ref1->seq < ref2->seq)
112                         return -1;
113                 if (ref1->seq > ref2->seq)
114                         return 1;
115         }
116         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118                 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119                                       btrfs_delayed_node_to_tree_ref(ref1),
120                                       ref1->type);
121         } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122                    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123                 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124                                       btrfs_delayed_node_to_data_ref(ref1));
125         }
126         BUG();
127         return 0;
128 }
129
130 /*
131  * insert a new ref into the rbtree.  This returns any existing refs
132  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
133  * inserted.
134  */
135 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136                                                   struct rb_node *node)
137 {
138         struct rb_node **p = &root->rb_node;
139         struct rb_node *parent_node = NULL;
140         struct btrfs_delayed_ref_node *entry;
141         struct btrfs_delayed_ref_node *ins;
142         int cmp;
143
144         ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145         while (*p) {
146                 parent_node = *p;
147                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
148                                  rb_node);
149
150                 cmp = comp_entry(entry, ins, 1);
151                 if (cmp < 0)
152                         p = &(*p)->rb_left;
153                 else if (cmp > 0)
154                         p = &(*p)->rb_right;
155                 else
156                         return entry;
157         }
158
159         rb_link_node(node, parent_node, p);
160         rb_insert_color(node, root);
161         return NULL;
162 }
163
164 /* insert a new ref to head ref rbtree */
165 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
166                                                    struct rb_node *node)
167 {
168         struct rb_node **p = &root->rb_node;
169         struct rb_node *parent_node = NULL;
170         struct btrfs_delayed_ref_head *entry;
171         struct btrfs_delayed_ref_head *ins;
172         u64 bytenr;
173
174         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
175         bytenr = ins->node.bytenr;
176         while (*p) {
177                 parent_node = *p;
178                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
179                                  href_node);
180
181                 if (bytenr < entry->node.bytenr)
182                         p = &(*p)->rb_left;
183                 else if (bytenr > entry->node.bytenr)
184                         p = &(*p)->rb_right;
185                 else
186                         return entry;
187         }
188
189         rb_link_node(node, parent_node, p);
190         rb_insert_color(node, root);
191         return NULL;
192 }
193
194 /*
195  * find an head entry based on bytenr. This returns the delayed ref
196  * head if it was able to find one, or NULL if nothing was in that spot.
197  * If return_bigger is given, the next bigger entry is returned if no exact
198  * match is found.
199  */
200 static struct btrfs_delayed_ref_head *
201 find_ref_head(struct rb_root *root, u64 bytenr,
202               struct btrfs_delayed_ref_head **last, int return_bigger)
203 {
204         struct rb_node *n;
205         struct btrfs_delayed_ref_head *entry;
206         int cmp = 0;
207
208 again:
209         n = root->rb_node;
210         entry = NULL;
211         while (n) {
212                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
213                 if (last)
214                         *last = entry;
215
216                 if (bytenr < entry->node.bytenr)
217                         cmp = -1;
218                 else if (bytenr > entry->node.bytenr)
219                         cmp = 1;
220                 else
221                         cmp = 0;
222
223                 if (cmp < 0)
224                         n = n->rb_left;
225                 else if (cmp > 0)
226                         n = n->rb_right;
227                 else
228                         return entry;
229         }
230         if (entry && return_bigger) {
231                 if (cmp > 0) {
232                         n = rb_next(&entry->href_node);
233                         if (!n)
234                                 n = rb_first(root);
235                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
236                                          href_node);
237                         bytenr = entry->node.bytenr;
238                         return_bigger = 0;
239                         goto again;
240                 }
241                 return entry;
242         }
243         return NULL;
244 }
245
246 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
247                            struct btrfs_delayed_ref_head *head)
248 {
249         struct btrfs_delayed_ref_root *delayed_refs;
250
251         delayed_refs = &trans->transaction->delayed_refs;
252         assert_spin_locked(&delayed_refs->lock);
253         if (mutex_trylock(&head->mutex))
254                 return 0;
255
256         atomic_inc(&head->node.refs);
257         spin_unlock(&delayed_refs->lock);
258
259         mutex_lock(&head->mutex);
260         spin_lock(&delayed_refs->lock);
261         if (!head->node.in_tree) {
262                 mutex_unlock(&head->mutex);
263                 btrfs_put_delayed_ref(&head->node);
264                 return -EAGAIN;
265         }
266         btrfs_put_delayed_ref(&head->node);
267         return 0;
268 }
269
270 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
271                                     struct btrfs_delayed_ref_root *delayed_refs,
272                                     struct btrfs_delayed_ref_head *head,
273                                     struct btrfs_delayed_ref_node *ref)
274 {
275         if (btrfs_delayed_ref_is_head(ref)) {
276                 head = btrfs_delayed_node_to_head(ref);
277                 rb_erase(&head->href_node, &delayed_refs->href_root);
278         } else {
279                 assert_spin_locked(&head->lock);
280                 rb_erase(&ref->rb_node, &head->ref_root);
281         }
282         ref->in_tree = 0;
283         btrfs_put_delayed_ref(ref);
284         atomic_dec(&delayed_refs->num_entries);
285         if (trans->delayed_ref_updates)
286                 trans->delayed_ref_updates--;
287 }
288
289 static int merge_ref(struct btrfs_trans_handle *trans,
290                      struct btrfs_delayed_ref_root *delayed_refs,
291                      struct btrfs_delayed_ref_head *head,
292                      struct btrfs_delayed_ref_node *ref, u64 seq)
293 {
294         struct rb_node *node;
295         int mod = 0;
296         int done = 0;
297
298         node = rb_next(&ref->rb_node);
299         while (!done && node) {
300                 struct btrfs_delayed_ref_node *next;
301
302                 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
303                 node = rb_next(node);
304                 if (seq && next->seq >= seq)
305                         break;
306                 if (comp_entry(ref, next, 0))
307                         continue;
308
309                 if (ref->action == next->action) {
310                         mod = next->ref_mod;
311                 } else {
312                         if (ref->ref_mod < next->ref_mod) {
313                                 struct btrfs_delayed_ref_node *tmp;
314
315                                 tmp = ref;
316                                 ref = next;
317                                 next = tmp;
318                                 done = 1;
319                         }
320                         mod = -next->ref_mod;
321                 }
322
323                 drop_delayed_ref(trans, delayed_refs, head, next);
324                 ref->ref_mod += mod;
325                 if (ref->ref_mod == 0) {
326                         drop_delayed_ref(trans, delayed_refs, head, ref);
327                         done = 1;
328                 } else {
329                         /*
330                          * You can't have multiples of the same ref on a tree
331                          * block.
332                          */
333                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
334                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
335                 }
336         }
337         return done;
338 }
339
340 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
341                               struct btrfs_fs_info *fs_info,
342                               struct btrfs_delayed_ref_root *delayed_refs,
343                               struct btrfs_delayed_ref_head *head)
344 {
345         struct rb_node *node;
346         u64 seq = 0;
347
348         assert_spin_locked(&head->lock);
349         /*
350          * We don't have too much refs to merge in the case of delayed data
351          * refs.
352          */
353         if (head->is_data)
354                 return;
355
356         spin_lock(&fs_info->tree_mod_seq_lock);
357         if (!list_empty(&fs_info->tree_mod_seq_list)) {
358                 struct seq_list *elem;
359
360                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
361                                         struct seq_list, list);
362                 seq = elem->seq;
363         }
364         spin_unlock(&fs_info->tree_mod_seq_lock);
365
366         node = rb_first(&head->ref_root);
367         while (node) {
368                 struct btrfs_delayed_ref_node *ref;
369
370                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
371                                rb_node);
372                 /* We can't merge refs that are outside of our seq count */
373                 if (seq && ref->seq >= seq)
374                         break;
375                 if (merge_ref(trans, delayed_refs, head, ref, seq))
376                         node = rb_first(&head->ref_root);
377                 else
378                         node = rb_next(&ref->rb_node);
379         }
380 }
381
382 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
383                             struct btrfs_delayed_ref_root *delayed_refs,
384                             u64 seq)
385 {
386         struct seq_list *elem;
387         int ret = 0;
388
389         spin_lock(&fs_info->tree_mod_seq_lock);
390         if (!list_empty(&fs_info->tree_mod_seq_list)) {
391                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
392                                         struct seq_list, list);
393                 if (seq >= elem->seq) {
394                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
395                                  (u32)(seq >> 32), (u32)seq,
396                                  (u32)(elem->seq >> 32), (u32)elem->seq,
397                                  delayed_refs);
398                         ret = 1;
399                 }
400         }
401
402         spin_unlock(&fs_info->tree_mod_seq_lock);
403         return ret;
404 }
405
406 struct btrfs_delayed_ref_head *
407 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
408 {
409         struct btrfs_delayed_ref_root *delayed_refs;
410         struct btrfs_delayed_ref_head *head;
411         u64 start;
412         bool loop = false;
413
414         delayed_refs = &trans->transaction->delayed_refs;
415
416 again:
417         start = delayed_refs->run_delayed_start;
418         head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
419         if (!head && !loop) {
420                 delayed_refs->run_delayed_start = 0;
421                 start = 0;
422                 loop = true;
423                 head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
424                 if (!head)
425                         return NULL;
426         } else if (!head && loop) {
427                 return NULL;
428         }
429
430         while (head->processing) {
431                 struct rb_node *node;
432
433                 node = rb_next(&head->href_node);
434                 if (!node) {
435                         if (loop)
436                                 return NULL;
437                         delayed_refs->run_delayed_start = 0;
438                         start = 0;
439                         loop = true;
440                         goto again;
441                 }
442                 head = rb_entry(node, struct btrfs_delayed_ref_head,
443                                 href_node);
444         }
445
446         head->processing = 1;
447         WARN_ON(delayed_refs->num_heads_ready == 0);
448         delayed_refs->num_heads_ready--;
449         delayed_refs->run_delayed_start = head->node.bytenr +
450                 head->node.num_bytes;
451         return head;
452 }
453
454 /*
455  * helper function to update an extent delayed ref in the
456  * rbtree.  existing and update must both have the same
457  * bytenr and parent
458  *
459  * This may free existing if the update cancels out whatever
460  * operation it was doing.
461  */
462 static noinline void
463 update_existing_ref(struct btrfs_trans_handle *trans,
464                     struct btrfs_delayed_ref_root *delayed_refs,
465                     struct btrfs_delayed_ref_head *head,
466                     struct btrfs_delayed_ref_node *existing,
467                     struct btrfs_delayed_ref_node *update)
468 {
469         if (update->action != existing->action) {
470                 /*
471                  * this is effectively undoing either an add or a
472                  * drop.  We decrement the ref_mod, and if it goes
473                  * down to zero we just delete the entry without
474                  * every changing the extent allocation tree.
475                  */
476                 existing->ref_mod--;
477                 if (existing->ref_mod == 0)
478                         drop_delayed_ref(trans, delayed_refs, head, existing);
479                 else
480                         WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
481                                 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
482         } else {
483                 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
484                         existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
485                 /*
486                  * the action on the existing ref matches
487                  * the action on the ref we're trying to add.
488                  * Bump the ref_mod by one so the backref that
489                  * is eventually added/removed has the correct
490                  * reference count
491                  */
492                 existing->ref_mod += update->ref_mod;
493         }
494 }
495
496 /*
497  * helper function to update the accounting in the head ref
498  * existing and update must have the same bytenr
499  */
500 static noinline void
501 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
502                          struct btrfs_delayed_ref_node *update)
503 {
504         struct btrfs_delayed_ref_head *existing_ref;
505         struct btrfs_delayed_ref_head *ref;
506
507         existing_ref = btrfs_delayed_node_to_head(existing);
508         ref = btrfs_delayed_node_to_head(update);
509         BUG_ON(existing_ref->is_data != ref->is_data);
510
511         if (ref->must_insert_reserved) {
512                 /* if the extent was freed and then
513                  * reallocated before the delayed ref
514                  * entries were processed, we can end up
515                  * with an existing head ref without
516                  * the must_insert_reserved flag set.
517                  * Set it again here
518                  */
519                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
520
521                 /*
522                  * update the num_bytes so we make sure the accounting
523                  * is done correctly
524                  */
525                 existing->num_bytes = update->num_bytes;
526
527         }
528
529         if (ref->extent_op) {
530                 if (!existing_ref->extent_op) {
531                         existing_ref->extent_op = ref->extent_op;
532                 } else {
533                         if (ref->extent_op->update_key) {
534                                 memcpy(&existing_ref->extent_op->key,
535                                        &ref->extent_op->key,
536                                        sizeof(ref->extent_op->key));
537                                 existing_ref->extent_op->update_key = 1;
538                         }
539                         if (ref->extent_op->update_flags) {
540                                 existing_ref->extent_op->flags_to_set |=
541                                         ref->extent_op->flags_to_set;
542                                 existing_ref->extent_op->update_flags = 1;
543                         }
544                         btrfs_free_delayed_extent_op(ref->extent_op);
545                 }
546         }
547         /*
548          * update the reference mod on the head to reflect this new operation,
549          * only need the lock for this case cause we could be processing it
550          * currently, for refs we just added we know we're a-ok.
551          */
552         spin_lock(&existing_ref->lock);
553         existing->ref_mod += update->ref_mod;
554         spin_unlock(&existing_ref->lock);
555 }
556
557 /*
558  * helper function to actually insert a head node into the rbtree.
559  * this does all the dirty work in terms of maintaining the correct
560  * overall modification count.
561  */
562 static noinline struct btrfs_delayed_ref_head *
563 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
564                      struct btrfs_trans_handle *trans,
565                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
566                      u64 num_bytes, int action, int is_data)
567 {
568         struct btrfs_delayed_ref_head *existing;
569         struct btrfs_delayed_ref_head *head_ref = NULL;
570         struct btrfs_delayed_ref_root *delayed_refs;
571         int count_mod = 1;
572         int must_insert_reserved = 0;
573
574         /*
575          * the head node stores the sum of all the mods, so dropping a ref
576          * should drop the sum in the head node by one.
577          */
578         if (action == BTRFS_UPDATE_DELAYED_HEAD)
579                 count_mod = 0;
580         else if (action == BTRFS_DROP_DELAYED_REF)
581                 count_mod = -1;
582
583         /*
584          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
585          * the reserved accounting when the extent is finally added, or
586          * if a later modification deletes the delayed ref without ever
587          * inserting the extent into the extent allocation tree.
588          * ref->must_insert_reserved is the flag used to record
589          * that accounting mods are required.
590          *
591          * Once we record must_insert_reserved, switch the action to
592          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
593          */
594         if (action == BTRFS_ADD_DELAYED_EXTENT)
595                 must_insert_reserved = 1;
596         else
597                 must_insert_reserved = 0;
598
599         delayed_refs = &trans->transaction->delayed_refs;
600
601         /* first set the basic ref node struct up */
602         atomic_set(&ref->refs, 1);
603         ref->bytenr = bytenr;
604         ref->num_bytes = num_bytes;
605         ref->ref_mod = count_mod;
606         ref->type  = 0;
607         ref->action  = 0;
608         ref->is_head = 1;
609         ref->in_tree = 1;
610         ref->seq = 0;
611
612         head_ref = btrfs_delayed_node_to_head(ref);
613         head_ref->must_insert_reserved = must_insert_reserved;
614         head_ref->is_data = is_data;
615         head_ref->ref_root = RB_ROOT;
616         head_ref->processing = 0;
617
618         spin_lock_init(&head_ref->lock);
619         mutex_init(&head_ref->mutex);
620
621         trace_add_delayed_ref_head(ref, head_ref, action);
622
623         existing = htree_insert(&delayed_refs->href_root,
624                                 &head_ref->href_node);
625         if (existing) {
626                 update_existing_head_ref(&existing->node, ref);
627                 /*
628                  * we've updated the existing ref, free the newly
629                  * allocated ref
630                  */
631                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
632                 head_ref = existing;
633         } else {
634                 delayed_refs->num_heads++;
635                 delayed_refs->num_heads_ready++;
636                 atomic_inc(&delayed_refs->num_entries);
637                 trans->delayed_ref_updates++;
638         }
639         return head_ref;
640 }
641
642 /*
643  * helper to insert a delayed tree ref into the rbtree.
644  */
645 static noinline void
646 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
647                      struct btrfs_trans_handle *trans,
648                      struct btrfs_delayed_ref_head *head_ref,
649                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
650                      u64 num_bytes, u64 parent, u64 ref_root, int level,
651                      int action, int for_cow)
652 {
653         struct btrfs_delayed_ref_node *existing;
654         struct btrfs_delayed_tree_ref *full_ref;
655         struct btrfs_delayed_ref_root *delayed_refs;
656         u64 seq = 0;
657
658         if (action == BTRFS_ADD_DELAYED_EXTENT)
659                 action = BTRFS_ADD_DELAYED_REF;
660
661         delayed_refs = &trans->transaction->delayed_refs;
662
663         /* first set the basic ref node struct up */
664         atomic_set(&ref->refs, 1);
665         ref->bytenr = bytenr;
666         ref->num_bytes = num_bytes;
667         ref->ref_mod = 1;
668         ref->action = action;
669         ref->is_head = 0;
670         ref->in_tree = 1;
671
672         if (need_ref_seq(for_cow, ref_root))
673                 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
674         ref->seq = seq;
675
676         full_ref = btrfs_delayed_node_to_tree_ref(ref);
677         full_ref->parent = parent;
678         full_ref->root = ref_root;
679         if (parent)
680                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
681         else
682                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
683         full_ref->level = level;
684
685         trace_add_delayed_tree_ref(ref, full_ref, action);
686
687         spin_lock(&head_ref->lock);
688         existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
689         if (existing) {
690                 update_existing_ref(trans, delayed_refs, head_ref, existing,
691                                     ref);
692                 /*
693                  * we've updated the existing ref, free the newly
694                  * allocated ref
695                  */
696                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
697         } else {
698                 atomic_inc(&delayed_refs->num_entries);
699                 trans->delayed_ref_updates++;
700         }
701         spin_unlock(&head_ref->lock);
702 }
703
704 /*
705  * helper to insert a delayed data ref into the rbtree.
706  */
707 static noinline void
708 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
709                      struct btrfs_trans_handle *trans,
710                      struct btrfs_delayed_ref_head *head_ref,
711                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
712                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
713                      u64 offset, int action, int for_cow)
714 {
715         struct btrfs_delayed_ref_node *existing;
716         struct btrfs_delayed_data_ref *full_ref;
717         struct btrfs_delayed_ref_root *delayed_refs;
718         u64 seq = 0;
719
720         if (action == BTRFS_ADD_DELAYED_EXTENT)
721                 action = BTRFS_ADD_DELAYED_REF;
722
723         delayed_refs = &trans->transaction->delayed_refs;
724
725         /* first set the basic ref node struct up */
726         atomic_set(&ref->refs, 1);
727         ref->bytenr = bytenr;
728         ref->num_bytes = num_bytes;
729         ref->ref_mod = 1;
730         ref->action = action;
731         ref->is_head = 0;
732         ref->in_tree = 1;
733
734         if (need_ref_seq(for_cow, ref_root))
735                 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
736         ref->seq = seq;
737
738         full_ref = btrfs_delayed_node_to_data_ref(ref);
739         full_ref->parent = parent;
740         full_ref->root = ref_root;
741         if (parent)
742                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
743         else
744                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
745
746         full_ref->objectid = owner;
747         full_ref->offset = offset;
748
749         trace_add_delayed_data_ref(ref, full_ref, action);
750
751         spin_lock(&head_ref->lock);
752         existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
753         if (existing) {
754                 update_existing_ref(trans, delayed_refs, head_ref, existing,
755                                     ref);
756                 /*
757                  * we've updated the existing ref, free the newly
758                  * allocated ref
759                  */
760                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
761         } else {
762                 atomic_inc(&delayed_refs->num_entries);
763                 trans->delayed_ref_updates++;
764         }
765         spin_unlock(&head_ref->lock);
766 }
767
768 /*
769  * add a delayed tree ref.  This does all of the accounting required
770  * to make sure the delayed ref is eventually processed before this
771  * transaction commits.
772  */
773 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
774                                struct btrfs_trans_handle *trans,
775                                u64 bytenr, u64 num_bytes, u64 parent,
776                                u64 ref_root,  int level, int action,
777                                struct btrfs_delayed_extent_op *extent_op,
778                                int for_cow)
779 {
780         struct btrfs_delayed_tree_ref *ref;
781         struct btrfs_delayed_ref_head *head_ref;
782         struct btrfs_delayed_ref_root *delayed_refs;
783
784         BUG_ON(extent_op && extent_op->is_data);
785         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
786         if (!ref)
787                 return -ENOMEM;
788
789         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
790         if (!head_ref) {
791                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
792                 return -ENOMEM;
793         }
794
795         head_ref->extent_op = extent_op;
796
797         delayed_refs = &trans->transaction->delayed_refs;
798         spin_lock(&delayed_refs->lock);
799
800         /*
801          * insert both the head node and the new ref without dropping
802          * the spin lock
803          */
804         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
805                                         bytenr, num_bytes, action, 0);
806
807         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
808                                    num_bytes, parent, ref_root, level, action,
809                                    for_cow);
810         spin_unlock(&delayed_refs->lock);
811         if (need_ref_seq(for_cow, ref_root))
812                 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
813
814         return 0;
815 }
816
817 /*
818  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
819  */
820 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
821                                struct btrfs_trans_handle *trans,
822                                u64 bytenr, u64 num_bytes,
823                                u64 parent, u64 ref_root,
824                                u64 owner, u64 offset, int action,
825                                struct btrfs_delayed_extent_op *extent_op,
826                                int for_cow)
827 {
828         struct btrfs_delayed_data_ref *ref;
829         struct btrfs_delayed_ref_head *head_ref;
830         struct btrfs_delayed_ref_root *delayed_refs;
831
832         BUG_ON(extent_op && !extent_op->is_data);
833         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
834         if (!ref)
835                 return -ENOMEM;
836
837         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
838         if (!head_ref) {
839                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
840                 return -ENOMEM;
841         }
842
843         head_ref->extent_op = extent_op;
844
845         delayed_refs = &trans->transaction->delayed_refs;
846         spin_lock(&delayed_refs->lock);
847
848         /*
849          * insert both the head node and the new ref without dropping
850          * the spin lock
851          */
852         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
853                                         bytenr, num_bytes, action, 1);
854
855         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
856                                    num_bytes, parent, ref_root, owner, offset,
857                                    action, for_cow);
858         spin_unlock(&delayed_refs->lock);
859         if (need_ref_seq(for_cow, ref_root))
860                 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
861
862         return 0;
863 }
864
865 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
866                                 struct btrfs_trans_handle *trans,
867                                 u64 bytenr, u64 num_bytes,
868                                 struct btrfs_delayed_extent_op *extent_op)
869 {
870         struct btrfs_delayed_ref_head *head_ref;
871         struct btrfs_delayed_ref_root *delayed_refs;
872
873         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
874         if (!head_ref)
875                 return -ENOMEM;
876
877         head_ref->extent_op = extent_op;
878
879         delayed_refs = &trans->transaction->delayed_refs;
880         spin_lock(&delayed_refs->lock);
881
882         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
883                                    num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
884                                    extent_op->is_data);
885
886         spin_unlock(&delayed_refs->lock);
887         return 0;
888 }
889
890 /*
891  * this does a simple search for the head node for a given extent.
892  * It must be called with the delayed ref spinlock held, and it returns
893  * the head node if any where found, or NULL if not.
894  */
895 struct btrfs_delayed_ref_head *
896 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
897 {
898         struct btrfs_delayed_ref_root *delayed_refs;
899
900         delayed_refs = &trans->transaction->delayed_refs;
901         return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
902 }
903
904 void btrfs_delayed_ref_exit(void)
905 {
906         if (btrfs_delayed_ref_head_cachep)
907                 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
908         if (btrfs_delayed_tree_ref_cachep)
909                 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
910         if (btrfs_delayed_data_ref_cachep)
911                 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
912         if (btrfs_delayed_extent_op_cachep)
913                 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
914 }
915
916 int btrfs_delayed_ref_init(void)
917 {
918         btrfs_delayed_ref_head_cachep = kmem_cache_create(
919                                 "btrfs_delayed_ref_head",
920                                 sizeof(struct btrfs_delayed_ref_head), 0,
921                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
922         if (!btrfs_delayed_ref_head_cachep)
923                 goto fail;
924
925         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
926                                 "btrfs_delayed_tree_ref",
927                                 sizeof(struct btrfs_delayed_tree_ref), 0,
928                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
929         if (!btrfs_delayed_tree_ref_cachep)
930                 goto fail;
931
932         btrfs_delayed_data_ref_cachep = kmem_cache_create(
933                                 "btrfs_delayed_data_ref",
934                                 sizeof(struct btrfs_delayed_data_ref), 0,
935                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
936         if (!btrfs_delayed_data_ref_cachep)
937                 goto fail;
938
939         btrfs_delayed_extent_op_cachep = kmem_cache_create(
940                                 "btrfs_delayed_extent_op",
941                                 sizeof(struct btrfs_delayed_extent_op), 0,
942                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
943         if (!btrfs_delayed_extent_op_cachep)
944                 goto fail;
945
946         return 0;
947 fail:
948         btrfs_delayed_ref_exit();
949         return -ENOMEM;
950 }