]> Pileus Git - ~andy/linux/blob - fs/btrfs/transaction.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[~andy/linux] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(atomic_read(&transaction->use_count) == 0);
37         if (atomic_dec_and_test(&transaction->use_count)) {
38                 BUG_ON(!list_empty(&transaction->list));
39                 WARN_ON(transaction->delayed_refs.root.rb_node);
40                 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
41                 memset(transaction, 0, sizeof(*transaction));
42                 kmem_cache_free(btrfs_transaction_cachep, transaction);
43         }
44 }
45
46 static noinline void switch_commit_root(struct btrfs_root *root)
47 {
48         free_extent_buffer(root->commit_root);
49         root->commit_root = btrfs_root_node(root);
50 }
51
52 /*
53  * either allocate a new transaction or hop into the existing one
54  */
55 static noinline int join_transaction(struct btrfs_root *root, int nofail)
56 {
57         struct btrfs_transaction *cur_trans;
58
59         spin_lock(&root->fs_info->trans_lock);
60 loop:
61         /* The file system has been taken offline. No new transactions. */
62         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
63                 spin_unlock(&root->fs_info->trans_lock);
64                 return -EROFS;
65         }
66
67         if (root->fs_info->trans_no_join) {
68                 if (!nofail) {
69                         spin_unlock(&root->fs_info->trans_lock);
70                         return -EBUSY;
71                 }
72         }
73
74         cur_trans = root->fs_info->running_transaction;
75         if (cur_trans) {
76                 if (cur_trans->aborted)
77                         return cur_trans->aborted;
78                 atomic_inc(&cur_trans->use_count);
79                 atomic_inc(&cur_trans->num_writers);
80                 cur_trans->num_joined++;
81                 spin_unlock(&root->fs_info->trans_lock);
82                 return 0;
83         }
84         spin_unlock(&root->fs_info->trans_lock);
85
86         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
87         if (!cur_trans)
88                 return -ENOMEM;
89
90         spin_lock(&root->fs_info->trans_lock);
91         if (root->fs_info->running_transaction) {
92                 /*
93                  * someone started a transaction after we unlocked.  Make sure
94                  * to redo the trans_no_join checks above
95                  */
96                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
97                 cur_trans = root->fs_info->running_transaction;
98                 goto loop;
99         }
100
101         atomic_set(&cur_trans->num_writers, 1);
102         cur_trans->num_joined = 0;
103         init_waitqueue_head(&cur_trans->writer_wait);
104         init_waitqueue_head(&cur_trans->commit_wait);
105         cur_trans->in_commit = 0;
106         cur_trans->blocked = 0;
107         /*
108          * One for this trans handle, one so it will live on until we
109          * commit the transaction.
110          */
111         atomic_set(&cur_trans->use_count, 2);
112         cur_trans->commit_done = 0;
113         cur_trans->start_time = get_seconds();
114
115         cur_trans->delayed_refs.root = RB_ROOT;
116         cur_trans->delayed_refs.num_entries = 0;
117         cur_trans->delayed_refs.num_heads_ready = 0;
118         cur_trans->delayed_refs.num_heads = 0;
119         cur_trans->delayed_refs.flushing = 0;
120         cur_trans->delayed_refs.run_delayed_start = 0;
121         cur_trans->delayed_refs.seq = 1;
122         init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
123         spin_lock_init(&cur_trans->commit_lock);
124         spin_lock_init(&cur_trans->delayed_refs.lock);
125         INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
126
127         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
128         list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
129         extent_io_tree_init(&cur_trans->dirty_pages,
130                              root->fs_info->btree_inode->i_mapping);
131         root->fs_info->generation++;
132         cur_trans->transid = root->fs_info->generation;
133         root->fs_info->running_transaction = cur_trans;
134         cur_trans->aborted = 0;
135         spin_unlock(&root->fs_info->trans_lock);
136
137         return 0;
138 }
139
140 /*
141  * this does all the record keeping required to make sure that a reference
142  * counted root is properly recorded in a given transaction.  This is required
143  * to make sure the old root from before we joined the transaction is deleted
144  * when the transaction commits
145  */
146 static int record_root_in_trans(struct btrfs_trans_handle *trans,
147                                struct btrfs_root *root)
148 {
149         if (root->ref_cows && root->last_trans < trans->transid) {
150                 WARN_ON(root == root->fs_info->extent_root);
151                 WARN_ON(root->commit_root != root->node);
152
153                 /*
154                  * see below for in_trans_setup usage rules
155                  * we have the reloc mutex held now, so there
156                  * is only one writer in this function
157                  */
158                 root->in_trans_setup = 1;
159
160                 /* make sure readers find in_trans_setup before
161                  * they find our root->last_trans update
162                  */
163                 smp_wmb();
164
165                 spin_lock(&root->fs_info->fs_roots_radix_lock);
166                 if (root->last_trans == trans->transid) {
167                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
168                         return 0;
169                 }
170                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
171                            (unsigned long)root->root_key.objectid,
172                            BTRFS_ROOT_TRANS_TAG);
173                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
174                 root->last_trans = trans->transid;
175
176                 /* this is pretty tricky.  We don't want to
177                  * take the relocation lock in btrfs_record_root_in_trans
178                  * unless we're really doing the first setup for this root in
179                  * this transaction.
180                  *
181                  * Normally we'd use root->last_trans as a flag to decide
182                  * if we want to take the expensive mutex.
183                  *
184                  * But, we have to set root->last_trans before we
185                  * init the relocation root, otherwise, we trip over warnings
186                  * in ctree.c.  The solution used here is to flag ourselves
187                  * with root->in_trans_setup.  When this is 1, we're still
188                  * fixing up the reloc trees and everyone must wait.
189                  *
190                  * When this is zero, they can trust root->last_trans and fly
191                  * through btrfs_record_root_in_trans without having to take the
192                  * lock.  smp_wmb() makes sure that all the writes above are
193                  * done before we pop in the zero below
194                  */
195                 btrfs_init_reloc_root(trans, root);
196                 smp_wmb();
197                 root->in_trans_setup = 0;
198         }
199         return 0;
200 }
201
202
203 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
204                                struct btrfs_root *root)
205 {
206         if (!root->ref_cows)
207                 return 0;
208
209         /*
210          * see record_root_in_trans for comments about in_trans_setup usage
211          * and barriers
212          */
213         smp_rmb();
214         if (root->last_trans == trans->transid &&
215             !root->in_trans_setup)
216                 return 0;
217
218         mutex_lock(&root->fs_info->reloc_mutex);
219         record_root_in_trans(trans, root);
220         mutex_unlock(&root->fs_info->reloc_mutex);
221
222         return 0;
223 }
224
225 /* wait for commit against the current transaction to become unblocked
226  * when this is done, it is safe to start a new transaction, but the current
227  * transaction might not be fully on disk.
228  */
229 static void wait_current_trans(struct btrfs_root *root)
230 {
231         struct btrfs_transaction *cur_trans;
232
233         spin_lock(&root->fs_info->trans_lock);
234         cur_trans = root->fs_info->running_transaction;
235         if (cur_trans && cur_trans->blocked) {
236                 atomic_inc(&cur_trans->use_count);
237                 spin_unlock(&root->fs_info->trans_lock);
238
239                 wait_event(root->fs_info->transaction_wait,
240                            !cur_trans->blocked);
241                 put_transaction(cur_trans);
242         } else {
243                 spin_unlock(&root->fs_info->trans_lock);
244         }
245 }
246
247 enum btrfs_trans_type {
248         TRANS_START,
249         TRANS_JOIN,
250         TRANS_USERSPACE,
251         TRANS_JOIN_NOLOCK,
252 };
253
254 static int may_wait_transaction(struct btrfs_root *root, int type)
255 {
256         if (root->fs_info->log_root_recovering)
257                 return 0;
258
259         if (type == TRANS_USERSPACE)
260                 return 1;
261
262         if (type == TRANS_START &&
263             !atomic_read(&root->fs_info->open_ioctl_trans))
264                 return 1;
265
266         return 0;
267 }
268
269 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
270                                                     u64 num_items, int type)
271 {
272         struct btrfs_trans_handle *h;
273         struct btrfs_transaction *cur_trans;
274         u64 num_bytes = 0;
275         int ret;
276
277         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
278                 return ERR_PTR(-EROFS);
279
280         if (current->journal_info) {
281                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
282                 h = current->journal_info;
283                 h->use_count++;
284                 h->orig_rsv = h->block_rsv;
285                 h->block_rsv = NULL;
286                 goto got_it;
287         }
288
289         /*
290          * Do the reservation before we join the transaction so we can do all
291          * the appropriate flushing if need be.
292          */
293         if (num_items > 0 && root != root->fs_info->chunk_root) {
294                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
295                 ret = btrfs_block_rsv_add(root,
296                                           &root->fs_info->trans_block_rsv,
297                                           num_bytes);
298                 if (ret)
299                         return ERR_PTR(ret);
300         }
301 again:
302         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
303         if (!h)
304                 return ERR_PTR(-ENOMEM);
305
306         if (may_wait_transaction(root, type))
307                 wait_current_trans(root);
308
309         do {
310                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
311                 if (ret == -EBUSY)
312                         wait_current_trans(root);
313         } while (ret == -EBUSY);
314
315         if (ret < 0) {
316                 kmem_cache_free(btrfs_trans_handle_cachep, h);
317                 return ERR_PTR(ret);
318         }
319
320         cur_trans = root->fs_info->running_transaction;
321
322         h->transid = cur_trans->transid;
323         h->transaction = cur_trans;
324         h->blocks_used = 0;
325         h->bytes_reserved = 0;
326         h->delayed_ref_updates = 0;
327         h->use_count = 1;
328         h->block_rsv = NULL;
329         h->orig_rsv = NULL;
330         h->aborted = 0;
331
332         smp_mb();
333         if (cur_trans->blocked && may_wait_transaction(root, type)) {
334                 btrfs_commit_transaction(h, root);
335                 goto again;
336         }
337
338         if (num_bytes) {
339                 trace_btrfs_space_reservation(root->fs_info, "transaction",
340                                               h->transid, num_bytes, 1);
341                 h->block_rsv = &root->fs_info->trans_block_rsv;
342                 h->bytes_reserved = num_bytes;
343         }
344
345 got_it:
346         btrfs_record_root_in_trans(h, root);
347
348         if (!current->journal_info && type != TRANS_USERSPACE)
349                 current->journal_info = h;
350         return h;
351 }
352
353 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
354                                                    int num_items)
355 {
356         return start_transaction(root, num_items, TRANS_START);
357 }
358 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
359 {
360         return start_transaction(root, 0, TRANS_JOIN);
361 }
362
363 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
364 {
365         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
366 }
367
368 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
369 {
370         return start_transaction(root, 0, TRANS_USERSPACE);
371 }
372
373 /* wait for a transaction commit to be fully complete */
374 static noinline void wait_for_commit(struct btrfs_root *root,
375                                     struct btrfs_transaction *commit)
376 {
377         wait_event(commit->commit_wait, commit->commit_done);
378 }
379
380 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
381 {
382         struct btrfs_transaction *cur_trans = NULL, *t;
383         int ret;
384
385         ret = 0;
386         if (transid) {
387                 if (transid <= root->fs_info->last_trans_committed)
388                         goto out;
389
390                 /* find specified transaction */
391                 spin_lock(&root->fs_info->trans_lock);
392                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
393                         if (t->transid == transid) {
394                                 cur_trans = t;
395                                 atomic_inc(&cur_trans->use_count);
396                                 break;
397                         }
398                         if (t->transid > transid)
399                                 break;
400                 }
401                 spin_unlock(&root->fs_info->trans_lock);
402                 ret = -EINVAL;
403                 if (!cur_trans)
404                         goto out;  /* bad transid */
405         } else {
406                 /* find newest transaction that is committing | committed */
407                 spin_lock(&root->fs_info->trans_lock);
408                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
409                                             list) {
410                         if (t->in_commit) {
411                                 if (t->commit_done)
412                                         break;
413                                 cur_trans = t;
414                                 atomic_inc(&cur_trans->use_count);
415                                 break;
416                         }
417                 }
418                 spin_unlock(&root->fs_info->trans_lock);
419                 if (!cur_trans)
420                         goto out;  /* nothing committing|committed */
421         }
422
423         wait_for_commit(root, cur_trans);
424
425         put_transaction(cur_trans);
426         ret = 0;
427 out:
428         return ret;
429 }
430
431 void btrfs_throttle(struct btrfs_root *root)
432 {
433         if (!atomic_read(&root->fs_info->open_ioctl_trans))
434                 wait_current_trans(root);
435 }
436
437 static int should_end_transaction(struct btrfs_trans_handle *trans,
438                                   struct btrfs_root *root)
439 {
440         int ret;
441
442         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
443         return ret ? 1 : 0;
444 }
445
446 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
447                                  struct btrfs_root *root)
448 {
449         struct btrfs_transaction *cur_trans = trans->transaction;
450         struct btrfs_block_rsv *rsv = trans->block_rsv;
451         int updates;
452         int err;
453
454         smp_mb();
455         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
456                 return 1;
457
458         /*
459          * We need to do this in case we're deleting csums so the global block
460          * rsv get's used instead of the csum block rsv.
461          */
462         trans->block_rsv = NULL;
463
464         updates = trans->delayed_ref_updates;
465         trans->delayed_ref_updates = 0;
466         if (updates) {
467                 err = btrfs_run_delayed_refs(trans, root, updates);
468                 if (err) /* Error code will also eval true */
469                         return err;
470         }
471
472         trans->block_rsv = rsv;
473
474         return should_end_transaction(trans, root);
475 }
476
477 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
478                           struct btrfs_root *root, int throttle, int lock)
479 {
480         struct btrfs_transaction *cur_trans = trans->transaction;
481         struct btrfs_fs_info *info = root->fs_info;
482         int count = 0;
483         int err = 0;
484
485         if (--trans->use_count) {
486                 trans->block_rsv = trans->orig_rsv;
487                 return 0;
488         }
489
490         btrfs_trans_release_metadata(trans, root);
491         trans->block_rsv = NULL;
492         while (count < 2) {
493                 unsigned long cur = trans->delayed_ref_updates;
494                 trans->delayed_ref_updates = 0;
495                 if (cur &&
496                     trans->transaction->delayed_refs.num_heads_ready > 64) {
497                         trans->delayed_ref_updates = 0;
498                         btrfs_run_delayed_refs(trans, root, cur);
499                 } else {
500                         break;
501                 }
502                 count++;
503         }
504
505         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
506             should_end_transaction(trans, root)) {
507                 trans->transaction->blocked = 1;
508                 smp_wmb();
509         }
510
511         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
512                 if (throttle) {
513                         /*
514                          * We may race with somebody else here so end up having
515                          * to call end_transaction on ourselves again, so inc
516                          * our use_count.
517                          */
518                         trans->use_count++;
519                         return btrfs_commit_transaction(trans, root);
520                 } else {
521                         wake_up_process(info->transaction_kthread);
522                 }
523         }
524
525         WARN_ON(cur_trans != info->running_transaction);
526         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
527         atomic_dec(&cur_trans->num_writers);
528
529         smp_mb();
530         if (waitqueue_active(&cur_trans->writer_wait))
531                 wake_up(&cur_trans->writer_wait);
532         put_transaction(cur_trans);
533
534         if (current->journal_info == trans)
535                 current->journal_info = NULL;
536
537         if (throttle)
538                 btrfs_run_delayed_iputs(root);
539
540         if (trans->aborted ||
541             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
542                 err = -EIO;
543         }
544
545         memset(trans, 0, sizeof(*trans));
546         kmem_cache_free(btrfs_trans_handle_cachep, trans);
547         return err;
548 }
549
550 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
551                           struct btrfs_root *root)
552 {
553         int ret;
554
555         ret = __btrfs_end_transaction(trans, root, 0, 1);
556         if (ret)
557                 return ret;
558         return 0;
559 }
560
561 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
562                                    struct btrfs_root *root)
563 {
564         int ret;
565
566         ret = __btrfs_end_transaction(trans, root, 1, 1);
567         if (ret)
568                 return ret;
569         return 0;
570 }
571
572 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
573                                  struct btrfs_root *root)
574 {
575         int ret;
576
577         ret = __btrfs_end_transaction(trans, root, 0, 0);
578         if (ret)
579                 return ret;
580         return 0;
581 }
582
583 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
584                                 struct btrfs_root *root)
585 {
586         return __btrfs_end_transaction(trans, root, 1, 1);
587 }
588
589 /*
590  * when btree blocks are allocated, they have some corresponding bits set for
591  * them in one of two extent_io trees.  This is used to make sure all of
592  * those extents are sent to disk but does not wait on them
593  */
594 int btrfs_write_marked_extents(struct btrfs_root *root,
595                                struct extent_io_tree *dirty_pages, int mark)
596 {
597         int err = 0;
598         int werr = 0;
599         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
600         u64 start = 0;
601         u64 end;
602
603         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
604                                       mark)) {
605                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
606                                    GFP_NOFS);
607                 err = filemap_fdatawrite_range(mapping, start, end);
608                 if (err)
609                         werr = err;
610                 cond_resched();
611                 start = end + 1;
612         }
613         if (err)
614                 werr = err;
615         return werr;
616 }
617
618 /*
619  * when btree blocks are allocated, they have some corresponding bits set for
620  * them in one of two extent_io trees.  This is used to make sure all of
621  * those extents are on disk for transaction or log commit.  We wait
622  * on all the pages and clear them from the dirty pages state tree
623  */
624 int btrfs_wait_marked_extents(struct btrfs_root *root,
625                               struct extent_io_tree *dirty_pages, int mark)
626 {
627         int err = 0;
628         int werr = 0;
629         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
630         u64 start = 0;
631         u64 end;
632
633         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
634                                       EXTENT_NEED_WAIT)) {
635                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
636                 err = filemap_fdatawait_range(mapping, start, end);
637                 if (err)
638                         werr = err;
639                 cond_resched();
640                 start = end + 1;
641         }
642         if (err)
643                 werr = err;
644         return werr;
645 }
646
647 /*
648  * when btree blocks are allocated, they have some corresponding bits set for
649  * them in one of two extent_io trees.  This is used to make sure all of
650  * those extents are on disk for transaction or log commit
651  */
652 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
653                                 struct extent_io_tree *dirty_pages, int mark)
654 {
655         int ret;
656         int ret2;
657
658         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
659         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
660
661         if (ret)
662                 return ret;
663         if (ret2)
664                 return ret2;
665         return 0;
666 }
667
668 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
669                                      struct btrfs_root *root)
670 {
671         if (!trans || !trans->transaction) {
672                 struct inode *btree_inode;
673                 btree_inode = root->fs_info->btree_inode;
674                 return filemap_write_and_wait(btree_inode->i_mapping);
675         }
676         return btrfs_write_and_wait_marked_extents(root,
677                                            &trans->transaction->dirty_pages,
678                                            EXTENT_DIRTY);
679 }
680
681 /*
682  * this is used to update the root pointer in the tree of tree roots.
683  *
684  * But, in the case of the extent allocation tree, updating the root
685  * pointer may allocate blocks which may change the root of the extent
686  * allocation tree.
687  *
688  * So, this loops and repeats and makes sure the cowonly root didn't
689  * change while the root pointer was being updated in the metadata.
690  */
691 static int update_cowonly_root(struct btrfs_trans_handle *trans,
692                                struct btrfs_root *root)
693 {
694         int ret;
695         u64 old_root_bytenr;
696         u64 old_root_used;
697         struct btrfs_root *tree_root = root->fs_info->tree_root;
698
699         old_root_used = btrfs_root_used(&root->root_item);
700         btrfs_write_dirty_block_groups(trans, root);
701
702         while (1) {
703                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
704                 if (old_root_bytenr == root->node->start &&
705                     old_root_used == btrfs_root_used(&root->root_item))
706                         break;
707
708                 btrfs_set_root_node(&root->root_item, root->node);
709                 ret = btrfs_update_root(trans, tree_root,
710                                         &root->root_key,
711                                         &root->root_item);
712                 if (ret)
713                         return ret;
714
715                 old_root_used = btrfs_root_used(&root->root_item);
716                 ret = btrfs_write_dirty_block_groups(trans, root);
717                 if (ret)
718                         return ret;
719         }
720
721         if (root != root->fs_info->extent_root)
722                 switch_commit_root(root);
723
724         return 0;
725 }
726
727 /*
728  * update all the cowonly tree roots on disk
729  *
730  * The error handling in this function may not be obvious. Any of the
731  * failures will cause the file system to go offline. We still need
732  * to clean up the delayed refs.
733  */
734 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
735                                          struct btrfs_root *root)
736 {
737         struct btrfs_fs_info *fs_info = root->fs_info;
738         struct list_head *next;
739         struct extent_buffer *eb;
740         int ret;
741
742         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
743         if (ret)
744                 return ret;
745
746         eb = btrfs_lock_root_node(fs_info->tree_root);
747         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
748                               0, &eb);
749         btrfs_tree_unlock(eb);
750         free_extent_buffer(eb);
751
752         if (ret)
753                 return ret;
754
755         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
756         if (ret)
757                 return ret;
758
759         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
760                 next = fs_info->dirty_cowonly_roots.next;
761                 list_del_init(next);
762                 root = list_entry(next, struct btrfs_root, dirty_list);
763
764                 ret = update_cowonly_root(trans, root);
765                 if (ret)
766                         return ret;
767         }
768
769         down_write(&fs_info->extent_commit_sem);
770         switch_commit_root(fs_info->extent_root);
771         up_write(&fs_info->extent_commit_sem);
772
773         return 0;
774 }
775
776 /*
777  * dead roots are old snapshots that need to be deleted.  This allocates
778  * a dirty root struct and adds it into the list of dead roots that need to
779  * be deleted
780  */
781 int btrfs_add_dead_root(struct btrfs_root *root)
782 {
783         spin_lock(&root->fs_info->trans_lock);
784         list_add(&root->root_list, &root->fs_info->dead_roots);
785         spin_unlock(&root->fs_info->trans_lock);
786         return 0;
787 }
788
789 /*
790  * update all the cowonly tree roots on disk
791  */
792 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
793                                     struct btrfs_root *root)
794 {
795         struct btrfs_root *gang[8];
796         struct btrfs_fs_info *fs_info = root->fs_info;
797         int i;
798         int ret;
799         int err = 0;
800
801         spin_lock(&fs_info->fs_roots_radix_lock);
802         while (1) {
803                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
804                                                  (void **)gang, 0,
805                                                  ARRAY_SIZE(gang),
806                                                  BTRFS_ROOT_TRANS_TAG);
807                 if (ret == 0)
808                         break;
809                 for (i = 0; i < ret; i++) {
810                         root = gang[i];
811                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
812                                         (unsigned long)root->root_key.objectid,
813                                         BTRFS_ROOT_TRANS_TAG);
814                         spin_unlock(&fs_info->fs_roots_radix_lock);
815
816                         btrfs_free_log(trans, root);
817                         btrfs_update_reloc_root(trans, root);
818                         btrfs_orphan_commit_root(trans, root);
819
820                         btrfs_save_ino_cache(root, trans);
821
822                         /* see comments in should_cow_block() */
823                         root->force_cow = 0;
824                         smp_wmb();
825
826                         if (root->commit_root != root->node) {
827                                 mutex_lock(&root->fs_commit_mutex);
828                                 switch_commit_root(root);
829                                 btrfs_unpin_free_ino(root);
830                                 mutex_unlock(&root->fs_commit_mutex);
831
832                                 btrfs_set_root_node(&root->root_item,
833                                                     root->node);
834                         }
835
836                         err = btrfs_update_root(trans, fs_info->tree_root,
837                                                 &root->root_key,
838                                                 &root->root_item);
839                         spin_lock(&fs_info->fs_roots_radix_lock);
840                         if (err)
841                                 break;
842                 }
843         }
844         spin_unlock(&fs_info->fs_roots_radix_lock);
845         return err;
846 }
847
848 /*
849  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
850  * otherwise every leaf in the btree is read and defragged.
851  */
852 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
853 {
854         struct btrfs_fs_info *info = root->fs_info;
855         struct btrfs_trans_handle *trans;
856         int ret;
857         unsigned long nr;
858
859         if (xchg(&root->defrag_running, 1))
860                 return 0;
861
862         while (1) {
863                 trans = btrfs_start_transaction(root, 0);
864                 if (IS_ERR(trans))
865                         return PTR_ERR(trans);
866
867                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
868
869                 nr = trans->blocks_used;
870                 btrfs_end_transaction(trans, root);
871                 btrfs_btree_balance_dirty(info->tree_root, nr);
872                 cond_resched();
873
874                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
875                         break;
876         }
877         root->defrag_running = 0;
878         return ret;
879 }
880
881 /*
882  * new snapshots need to be created at a very specific time in the
883  * transaction commit.  This does the actual creation
884  */
885 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
886                                    struct btrfs_fs_info *fs_info,
887                                    struct btrfs_pending_snapshot *pending)
888 {
889         struct btrfs_key key;
890         struct btrfs_root_item *new_root_item;
891         struct btrfs_root *tree_root = fs_info->tree_root;
892         struct btrfs_root *root = pending->root;
893         struct btrfs_root *parent_root;
894         struct btrfs_block_rsv *rsv;
895         struct inode *parent_inode;
896         struct dentry *parent;
897         struct dentry *dentry;
898         struct extent_buffer *tmp;
899         struct extent_buffer *old;
900         int ret;
901         u64 to_reserve = 0;
902         u64 index = 0;
903         u64 objectid;
904         u64 root_flags;
905
906         rsv = trans->block_rsv;
907
908         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
909         if (!new_root_item) {
910                 ret = pending->error = -ENOMEM;
911                 goto fail;
912         }
913
914         ret = btrfs_find_free_objectid(tree_root, &objectid);
915         if (ret) {
916                 pending->error = ret;
917                 goto fail;
918         }
919
920         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
921
922         if (to_reserve > 0) {
923                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
924                                                   to_reserve);
925                 if (ret) {
926                         pending->error = ret;
927                         goto fail;
928                 }
929         }
930
931         key.objectid = objectid;
932         key.offset = (u64)-1;
933         key.type = BTRFS_ROOT_ITEM_KEY;
934
935         trans->block_rsv = &pending->block_rsv;
936
937         dentry = pending->dentry;
938         parent = dget_parent(dentry);
939         parent_inode = parent->d_inode;
940         parent_root = BTRFS_I(parent_inode)->root;
941         record_root_in_trans(trans, parent_root);
942
943         /*
944          * insert the directory item
945          */
946         ret = btrfs_set_inode_index(parent_inode, &index);
947         BUG_ON(ret); /* -ENOMEM */
948         ret = btrfs_insert_dir_item(trans, parent_root,
949                                 dentry->d_name.name, dentry->d_name.len,
950                                 parent_inode, &key,
951                                 BTRFS_FT_DIR, index);
952         if (ret == -EEXIST) {
953                 pending->error = -EEXIST;
954                 dput(parent);
955                 goto fail;
956         } else if (ret) {
957                 goto abort_trans_dput;
958         }
959
960         btrfs_i_size_write(parent_inode, parent_inode->i_size +
961                                          dentry->d_name.len * 2);
962         ret = btrfs_update_inode(trans, parent_root, parent_inode);
963         if (ret)
964                 goto abort_trans_dput;
965
966         /*
967          * pull in the delayed directory update
968          * and the delayed inode item
969          * otherwise we corrupt the FS during
970          * snapshot
971          */
972         ret = btrfs_run_delayed_items(trans, root);
973         if (ret) { /* Transaction aborted */
974                 dput(parent);
975                 goto fail;
976         }
977
978         record_root_in_trans(trans, root);
979         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
980         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
981         btrfs_check_and_init_root_item(new_root_item);
982
983         root_flags = btrfs_root_flags(new_root_item);
984         if (pending->readonly)
985                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
986         else
987                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
988         btrfs_set_root_flags(new_root_item, root_flags);
989
990         old = btrfs_lock_root_node(root);
991         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
992         if (ret) {
993                 btrfs_tree_unlock(old);
994                 free_extent_buffer(old);
995                 goto abort_trans_dput;
996         }
997
998         btrfs_set_lock_blocking(old);
999
1000         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1001         /* clean up in any case */
1002         btrfs_tree_unlock(old);
1003         free_extent_buffer(old);
1004         if (ret)
1005                 goto abort_trans_dput;
1006
1007         /* see comments in should_cow_block() */
1008         root->force_cow = 1;
1009         smp_wmb();
1010
1011         btrfs_set_root_node(new_root_item, tmp);
1012         /* record when the snapshot was created in key.offset */
1013         key.offset = trans->transid;
1014         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1015         btrfs_tree_unlock(tmp);
1016         free_extent_buffer(tmp);
1017         if (ret)
1018                 goto abort_trans_dput;
1019
1020         /*
1021          * insert root back/forward references
1022          */
1023         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1024                                  parent_root->root_key.objectid,
1025                                  btrfs_ino(parent_inode), index,
1026                                  dentry->d_name.name, dentry->d_name.len);
1027         dput(parent);
1028         if (ret)
1029                 goto fail;
1030
1031         key.offset = (u64)-1;
1032         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1033         if (IS_ERR(pending->snap)) {
1034                 ret = PTR_ERR(pending->snap);
1035                 goto abort_trans;
1036         }
1037
1038         ret = btrfs_reloc_post_snapshot(trans, pending);
1039         if (ret)
1040                 goto abort_trans;
1041         ret = 0;
1042 fail:
1043         kfree(new_root_item);
1044         trans->block_rsv = rsv;
1045         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1046         return ret;
1047
1048 abort_trans_dput:
1049         dput(parent);
1050 abort_trans:
1051         btrfs_abort_transaction(trans, root, ret);
1052         goto fail;
1053 }
1054
1055 /*
1056  * create all the snapshots we've scheduled for creation
1057  */
1058 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1059                                              struct btrfs_fs_info *fs_info)
1060 {
1061         struct btrfs_pending_snapshot *pending;
1062         struct list_head *head = &trans->transaction->pending_snapshots;
1063
1064         list_for_each_entry(pending, head, list)
1065                 create_pending_snapshot(trans, fs_info, pending);
1066         return 0;
1067 }
1068
1069 static void update_super_roots(struct btrfs_root *root)
1070 {
1071         struct btrfs_root_item *root_item;
1072         struct btrfs_super_block *super;
1073
1074         super = root->fs_info->super_copy;
1075
1076         root_item = &root->fs_info->chunk_root->root_item;
1077         super->chunk_root = root_item->bytenr;
1078         super->chunk_root_generation = root_item->generation;
1079         super->chunk_root_level = root_item->level;
1080
1081         root_item = &root->fs_info->tree_root->root_item;
1082         super->root = root_item->bytenr;
1083         super->generation = root_item->generation;
1084         super->root_level = root_item->level;
1085         if (btrfs_test_opt(root, SPACE_CACHE))
1086                 super->cache_generation = root_item->generation;
1087 }
1088
1089 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1090 {
1091         int ret = 0;
1092         spin_lock(&info->trans_lock);
1093         if (info->running_transaction)
1094                 ret = info->running_transaction->in_commit;
1095         spin_unlock(&info->trans_lock);
1096         return ret;
1097 }
1098
1099 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1100 {
1101         int ret = 0;
1102         spin_lock(&info->trans_lock);
1103         if (info->running_transaction)
1104                 ret = info->running_transaction->blocked;
1105         spin_unlock(&info->trans_lock);
1106         return ret;
1107 }
1108
1109 /*
1110  * wait for the current transaction commit to start and block subsequent
1111  * transaction joins
1112  */
1113 static void wait_current_trans_commit_start(struct btrfs_root *root,
1114                                             struct btrfs_transaction *trans)
1115 {
1116         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1117 }
1118
1119 /*
1120  * wait for the current transaction to start and then become unblocked.
1121  * caller holds ref.
1122  */
1123 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1124                                          struct btrfs_transaction *trans)
1125 {
1126         wait_event(root->fs_info->transaction_wait,
1127                    trans->commit_done || (trans->in_commit && !trans->blocked));
1128 }
1129
1130 /*
1131  * commit transactions asynchronously. once btrfs_commit_transaction_async
1132  * returns, any subsequent transaction will not be allowed to join.
1133  */
1134 struct btrfs_async_commit {
1135         struct btrfs_trans_handle *newtrans;
1136         struct btrfs_root *root;
1137         struct delayed_work work;
1138 };
1139
1140 static void do_async_commit(struct work_struct *work)
1141 {
1142         struct btrfs_async_commit *ac =
1143                 container_of(work, struct btrfs_async_commit, work.work);
1144
1145         btrfs_commit_transaction(ac->newtrans, ac->root);
1146         kfree(ac);
1147 }
1148
1149 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1150                                    struct btrfs_root *root,
1151                                    int wait_for_unblock)
1152 {
1153         struct btrfs_async_commit *ac;
1154         struct btrfs_transaction *cur_trans;
1155
1156         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1157         if (!ac)
1158                 return -ENOMEM;
1159
1160         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1161         ac->root = root;
1162         ac->newtrans = btrfs_join_transaction(root);
1163         if (IS_ERR(ac->newtrans)) {
1164                 int err = PTR_ERR(ac->newtrans);
1165                 kfree(ac);
1166                 return err;
1167         }
1168
1169         /* take transaction reference */
1170         cur_trans = trans->transaction;
1171         atomic_inc(&cur_trans->use_count);
1172
1173         btrfs_end_transaction(trans, root);
1174         schedule_delayed_work(&ac->work, 0);
1175
1176         /* wait for transaction to start and unblock */
1177         if (wait_for_unblock)
1178                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1179         else
1180                 wait_current_trans_commit_start(root, cur_trans);
1181
1182         if (current->journal_info == trans)
1183                 current->journal_info = NULL;
1184
1185         put_transaction(cur_trans);
1186         return 0;
1187 }
1188
1189
1190 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1191                                 struct btrfs_root *root)
1192 {
1193         struct btrfs_transaction *cur_trans = trans->transaction;
1194
1195         WARN_ON(trans->use_count > 1);
1196
1197         spin_lock(&root->fs_info->trans_lock);
1198         list_del_init(&cur_trans->list);
1199         spin_unlock(&root->fs_info->trans_lock);
1200
1201         btrfs_cleanup_one_transaction(trans->transaction, root);
1202
1203         put_transaction(cur_trans);
1204         put_transaction(cur_trans);
1205
1206         trace_btrfs_transaction_commit(root);
1207
1208         btrfs_scrub_continue(root);
1209
1210         if (current->journal_info == trans)
1211                 current->journal_info = NULL;
1212
1213         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1214 }
1215
1216 /*
1217  * btrfs_transaction state sequence:
1218  *    in_commit = 0, blocked = 0  (initial)
1219  *    in_commit = 1, blocked = 1
1220  *    blocked = 0
1221  *    commit_done = 1
1222  */
1223 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1224                              struct btrfs_root *root)
1225 {
1226         unsigned long joined = 0;
1227         struct btrfs_transaction *cur_trans = trans->transaction;
1228         struct btrfs_transaction *prev_trans = NULL;
1229         DEFINE_WAIT(wait);
1230         int ret = -EIO;
1231         int should_grow = 0;
1232         unsigned long now = get_seconds();
1233         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1234
1235         btrfs_run_ordered_operations(root, 0);
1236
1237         btrfs_trans_release_metadata(trans, root);
1238         trans->block_rsv = NULL;
1239
1240         if (cur_trans->aborted)
1241                 goto cleanup_transaction;
1242
1243         /* make a pass through all the delayed refs we have so far
1244          * any runnings procs may add more while we are here
1245          */
1246         ret = btrfs_run_delayed_refs(trans, root, 0);
1247         if (ret)
1248                 goto cleanup_transaction;
1249
1250         cur_trans = trans->transaction;
1251
1252         /*
1253          * set the flushing flag so procs in this transaction have to
1254          * start sending their work down.
1255          */
1256         cur_trans->delayed_refs.flushing = 1;
1257
1258         ret = btrfs_run_delayed_refs(trans, root, 0);
1259         if (ret)
1260                 goto cleanup_transaction;
1261
1262         spin_lock(&cur_trans->commit_lock);
1263         if (cur_trans->in_commit) {
1264                 spin_unlock(&cur_trans->commit_lock);
1265                 atomic_inc(&cur_trans->use_count);
1266                 ret = btrfs_end_transaction(trans, root);
1267
1268                 wait_for_commit(root, cur_trans);
1269
1270                 put_transaction(cur_trans);
1271
1272                 return ret;
1273         }
1274
1275         trans->transaction->in_commit = 1;
1276         trans->transaction->blocked = 1;
1277         spin_unlock(&cur_trans->commit_lock);
1278         wake_up(&root->fs_info->transaction_blocked_wait);
1279
1280         spin_lock(&root->fs_info->trans_lock);
1281         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1282                 prev_trans = list_entry(cur_trans->list.prev,
1283                                         struct btrfs_transaction, list);
1284                 if (!prev_trans->commit_done) {
1285                         atomic_inc(&prev_trans->use_count);
1286                         spin_unlock(&root->fs_info->trans_lock);
1287
1288                         wait_for_commit(root, prev_trans);
1289
1290                         put_transaction(prev_trans);
1291                 } else {
1292                         spin_unlock(&root->fs_info->trans_lock);
1293                 }
1294         } else {
1295                 spin_unlock(&root->fs_info->trans_lock);
1296         }
1297
1298         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1299                 should_grow = 1;
1300
1301         do {
1302                 int snap_pending = 0;
1303
1304                 joined = cur_trans->num_joined;
1305                 if (!list_empty(&trans->transaction->pending_snapshots))
1306                         snap_pending = 1;
1307
1308                 WARN_ON(cur_trans != trans->transaction);
1309
1310                 if (flush_on_commit || snap_pending) {
1311                         btrfs_start_delalloc_inodes(root, 1);
1312                         btrfs_wait_ordered_extents(root, 0, 1);
1313                 }
1314
1315                 ret = btrfs_run_delayed_items(trans, root);
1316                 if (ret)
1317                         goto cleanup_transaction;
1318
1319                 /*
1320                  * rename don't use btrfs_join_transaction, so, once we
1321                  * set the transaction to blocked above, we aren't going
1322                  * to get any new ordered operations.  We can safely run
1323                  * it here and no for sure that nothing new will be added
1324                  * to the list
1325                  */
1326                 btrfs_run_ordered_operations(root, 1);
1327
1328                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1329                                 TASK_UNINTERRUPTIBLE);
1330
1331                 if (atomic_read(&cur_trans->num_writers) > 1)
1332                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1333                 else if (should_grow)
1334                         schedule_timeout(1);
1335
1336                 finish_wait(&cur_trans->writer_wait, &wait);
1337         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1338                  (should_grow && cur_trans->num_joined != joined));
1339
1340         /*
1341          * Ok now we need to make sure to block out any other joins while we
1342          * commit the transaction.  We could have started a join before setting
1343          * no_join so make sure to wait for num_writers to == 1 again.
1344          */
1345         spin_lock(&root->fs_info->trans_lock);
1346         root->fs_info->trans_no_join = 1;
1347         spin_unlock(&root->fs_info->trans_lock);
1348         wait_event(cur_trans->writer_wait,
1349                    atomic_read(&cur_trans->num_writers) == 1);
1350
1351         /*
1352          * the reloc mutex makes sure that we stop
1353          * the balancing code from coming in and moving
1354          * extents around in the middle of the commit
1355          */
1356         mutex_lock(&root->fs_info->reloc_mutex);
1357
1358         ret = btrfs_run_delayed_items(trans, root);
1359         if (ret) {
1360                 mutex_unlock(&root->fs_info->reloc_mutex);
1361                 goto cleanup_transaction;
1362         }
1363
1364         ret = create_pending_snapshots(trans, root->fs_info);
1365         if (ret) {
1366                 mutex_unlock(&root->fs_info->reloc_mutex);
1367                 goto cleanup_transaction;
1368         }
1369
1370         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1371         if (ret) {
1372                 mutex_unlock(&root->fs_info->reloc_mutex);
1373                 goto cleanup_transaction;
1374         }
1375
1376         /*
1377          * make sure none of the code above managed to slip in a
1378          * delayed item
1379          */
1380         btrfs_assert_delayed_root_empty(root);
1381
1382         WARN_ON(cur_trans != trans->transaction);
1383
1384         btrfs_scrub_pause(root);
1385         /* btrfs_commit_tree_roots is responsible for getting the
1386          * various roots consistent with each other.  Every pointer
1387          * in the tree of tree roots has to point to the most up to date
1388          * root for every subvolume and other tree.  So, we have to keep
1389          * the tree logging code from jumping in and changing any
1390          * of the trees.
1391          *
1392          * At this point in the commit, there can't be any tree-log
1393          * writers, but a little lower down we drop the trans mutex
1394          * and let new people in.  By holding the tree_log_mutex
1395          * from now until after the super is written, we avoid races
1396          * with the tree-log code.
1397          */
1398         mutex_lock(&root->fs_info->tree_log_mutex);
1399
1400         ret = commit_fs_roots(trans, root);
1401         if (ret) {
1402                 mutex_unlock(&root->fs_info->tree_log_mutex);
1403                 goto cleanup_transaction;
1404         }
1405
1406         /* commit_fs_roots gets rid of all the tree log roots, it is now
1407          * safe to free the root of tree log roots
1408          */
1409         btrfs_free_log_root_tree(trans, root->fs_info);
1410
1411         ret = commit_cowonly_roots(trans, root);
1412         if (ret) {
1413                 mutex_unlock(&root->fs_info->tree_log_mutex);
1414                 goto cleanup_transaction;
1415         }
1416
1417         btrfs_prepare_extent_commit(trans, root);
1418
1419         cur_trans = root->fs_info->running_transaction;
1420
1421         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1422                             root->fs_info->tree_root->node);
1423         switch_commit_root(root->fs_info->tree_root);
1424
1425         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1426                             root->fs_info->chunk_root->node);
1427         switch_commit_root(root->fs_info->chunk_root);
1428
1429         update_super_roots(root);
1430
1431         if (!root->fs_info->log_root_recovering) {
1432                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1433                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1434         }
1435
1436         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1437                sizeof(*root->fs_info->super_copy));
1438
1439         trans->transaction->blocked = 0;
1440         spin_lock(&root->fs_info->trans_lock);
1441         root->fs_info->running_transaction = NULL;
1442         root->fs_info->trans_no_join = 0;
1443         spin_unlock(&root->fs_info->trans_lock);
1444         mutex_unlock(&root->fs_info->reloc_mutex);
1445
1446         wake_up(&root->fs_info->transaction_wait);
1447
1448         ret = btrfs_write_and_wait_transaction(trans, root);
1449         if (ret) {
1450                 btrfs_error(root->fs_info, ret,
1451                             "Error while writing out transaction.");
1452                 mutex_unlock(&root->fs_info->tree_log_mutex);
1453                 goto cleanup_transaction;
1454         }
1455
1456         ret = write_ctree_super(trans, root, 0);
1457         if (ret) {
1458                 mutex_unlock(&root->fs_info->tree_log_mutex);
1459                 goto cleanup_transaction;
1460         }
1461
1462         /*
1463          * the super is written, we can safely allow the tree-loggers
1464          * to go about their business
1465          */
1466         mutex_unlock(&root->fs_info->tree_log_mutex);
1467
1468         btrfs_finish_extent_commit(trans, root);
1469
1470         cur_trans->commit_done = 1;
1471
1472         root->fs_info->last_trans_committed = cur_trans->transid;
1473
1474         wake_up(&cur_trans->commit_wait);
1475
1476         spin_lock(&root->fs_info->trans_lock);
1477         list_del_init(&cur_trans->list);
1478         spin_unlock(&root->fs_info->trans_lock);
1479
1480         put_transaction(cur_trans);
1481         put_transaction(cur_trans);
1482
1483         trace_btrfs_transaction_commit(root);
1484
1485         btrfs_scrub_continue(root);
1486
1487         if (current->journal_info == trans)
1488                 current->journal_info = NULL;
1489
1490         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1491
1492         if (current != root->fs_info->transaction_kthread)
1493                 btrfs_run_delayed_iputs(root);
1494
1495         return ret;
1496
1497 cleanup_transaction:
1498         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1499 //      WARN_ON(1);
1500         if (current->journal_info == trans)
1501                 current->journal_info = NULL;
1502         cleanup_transaction(trans, root);
1503
1504         return ret;
1505 }
1506
1507 /*
1508  * interface function to delete all the snapshots we have scheduled for deletion
1509  */
1510 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1511 {
1512         LIST_HEAD(list);
1513         struct btrfs_fs_info *fs_info = root->fs_info;
1514
1515         spin_lock(&fs_info->trans_lock);
1516         list_splice_init(&fs_info->dead_roots, &list);
1517         spin_unlock(&fs_info->trans_lock);
1518
1519         while (!list_empty(&list)) {
1520                 int ret;
1521
1522                 root = list_entry(list.next, struct btrfs_root, root_list);
1523                 list_del(&root->root_list);
1524
1525                 btrfs_kill_all_delayed_nodes(root);
1526
1527                 if (btrfs_header_backref_rev(root->node) <
1528                     BTRFS_MIXED_BACKREF_REV)
1529                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1530                 else
1531                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1532                 BUG_ON(ret < 0);
1533         }
1534         return 0;
1535 }