]> Pileus Git - ~andy/linux/blob - fs/btrfs/transaction.c
Merge branch 'allocation-fixes' into integration
[~andy/linux] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(atomic_read(&transaction->use_count) == 0);
37         if (atomic_dec_and_test(&transaction->use_count)) {
38                 BUG_ON(!list_empty(&transaction->list));
39                 memset(transaction, 0, sizeof(*transaction));
40                 kmem_cache_free(btrfs_transaction_cachep, transaction);
41         }
42 }
43
44 static noinline void switch_commit_root(struct btrfs_root *root)
45 {
46         free_extent_buffer(root->commit_root);
47         root->commit_root = btrfs_root_node(root);
48 }
49
50 /*
51  * either allocate a new transaction or hop into the existing one
52  */
53 static noinline int join_transaction(struct btrfs_root *root, int nofail)
54 {
55         struct btrfs_transaction *cur_trans;
56
57         spin_lock(&root->fs_info->trans_lock);
58 loop:
59         if (root->fs_info->trans_no_join) {
60                 if (!nofail) {
61                         spin_unlock(&root->fs_info->trans_lock);
62                         return -EBUSY;
63                 }
64         }
65
66         cur_trans = root->fs_info->running_transaction;
67         if (cur_trans) {
68                 atomic_inc(&cur_trans->use_count);
69                 atomic_inc(&cur_trans->num_writers);
70                 cur_trans->num_joined++;
71                 spin_unlock(&root->fs_info->trans_lock);
72                 return 0;
73         }
74         spin_unlock(&root->fs_info->trans_lock);
75
76         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
77         if (!cur_trans)
78                 return -ENOMEM;
79
80         spin_lock(&root->fs_info->trans_lock);
81         if (root->fs_info->running_transaction) {
82                 /*
83                  * someone started a transaction after we unlocked.  Make sure
84                  * to redo the trans_no_join checks above
85                  */
86                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
87                 cur_trans = root->fs_info->running_transaction;
88                 goto loop;
89         }
90
91         atomic_set(&cur_trans->num_writers, 1);
92         cur_trans->num_joined = 0;
93         init_waitqueue_head(&cur_trans->writer_wait);
94         init_waitqueue_head(&cur_trans->commit_wait);
95         cur_trans->in_commit = 0;
96         cur_trans->blocked = 0;
97         /*
98          * One for this trans handle, one so it will live on until we
99          * commit the transaction.
100          */
101         atomic_set(&cur_trans->use_count, 2);
102         cur_trans->commit_done = 0;
103         cur_trans->start_time = get_seconds();
104
105         cur_trans->delayed_refs.root = RB_ROOT;
106         cur_trans->delayed_refs.num_entries = 0;
107         cur_trans->delayed_refs.num_heads_ready = 0;
108         cur_trans->delayed_refs.num_heads = 0;
109         cur_trans->delayed_refs.flushing = 0;
110         cur_trans->delayed_refs.run_delayed_start = 0;
111         spin_lock_init(&cur_trans->commit_lock);
112         spin_lock_init(&cur_trans->delayed_refs.lock);
113
114         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
115         list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
116         extent_io_tree_init(&cur_trans->dirty_pages,
117                              root->fs_info->btree_inode->i_mapping);
118         root->fs_info->generation++;
119         cur_trans->transid = root->fs_info->generation;
120         root->fs_info->running_transaction = cur_trans;
121         spin_unlock(&root->fs_info->trans_lock);
122
123         return 0;
124 }
125
126 /*
127  * this does all the record keeping required to make sure that a reference
128  * counted root is properly recorded in a given transaction.  This is required
129  * to make sure the old root from before we joined the transaction is deleted
130  * when the transaction commits
131  */
132 static int record_root_in_trans(struct btrfs_trans_handle *trans,
133                                struct btrfs_root *root)
134 {
135         if (root->ref_cows && root->last_trans < trans->transid) {
136                 WARN_ON(root == root->fs_info->extent_root);
137                 WARN_ON(root->commit_root != root->node);
138
139                 /*
140                  * see below for in_trans_setup usage rules
141                  * we have the reloc mutex held now, so there
142                  * is only one writer in this function
143                  */
144                 root->in_trans_setup = 1;
145
146                 /* make sure readers find in_trans_setup before
147                  * they find our root->last_trans update
148                  */
149                 smp_wmb();
150
151                 spin_lock(&root->fs_info->fs_roots_radix_lock);
152                 if (root->last_trans == trans->transid) {
153                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
154                         return 0;
155                 }
156                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
157                            (unsigned long)root->root_key.objectid,
158                            BTRFS_ROOT_TRANS_TAG);
159                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
160                 root->last_trans = trans->transid;
161
162                 /* this is pretty tricky.  We don't want to
163                  * take the relocation lock in btrfs_record_root_in_trans
164                  * unless we're really doing the first setup for this root in
165                  * this transaction.
166                  *
167                  * Normally we'd use root->last_trans as a flag to decide
168                  * if we want to take the expensive mutex.
169                  *
170                  * But, we have to set root->last_trans before we
171                  * init the relocation root, otherwise, we trip over warnings
172                  * in ctree.c.  The solution used here is to flag ourselves
173                  * with root->in_trans_setup.  When this is 1, we're still
174                  * fixing up the reloc trees and everyone must wait.
175                  *
176                  * When this is zero, they can trust root->last_trans and fly
177                  * through btrfs_record_root_in_trans without having to take the
178                  * lock.  smp_wmb() makes sure that all the writes above are
179                  * done before we pop in the zero below
180                  */
181                 btrfs_init_reloc_root(trans, root);
182                 smp_wmb();
183                 root->in_trans_setup = 0;
184         }
185         return 0;
186 }
187
188
189 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
190                                struct btrfs_root *root)
191 {
192         if (!root->ref_cows)
193                 return 0;
194
195         /*
196          * see record_root_in_trans for comments about in_trans_setup usage
197          * and barriers
198          */
199         smp_rmb();
200         if (root->last_trans == trans->transid &&
201             !root->in_trans_setup)
202                 return 0;
203
204         mutex_lock(&root->fs_info->reloc_mutex);
205         record_root_in_trans(trans, root);
206         mutex_unlock(&root->fs_info->reloc_mutex);
207
208         return 0;
209 }
210
211 /* wait for commit against the current transaction to become unblocked
212  * when this is done, it is safe to start a new transaction, but the current
213  * transaction might not be fully on disk.
214  */
215 static void wait_current_trans(struct btrfs_root *root)
216 {
217         struct btrfs_transaction *cur_trans;
218
219         spin_lock(&root->fs_info->trans_lock);
220         cur_trans = root->fs_info->running_transaction;
221         if (cur_trans && cur_trans->blocked) {
222                 atomic_inc(&cur_trans->use_count);
223                 spin_unlock(&root->fs_info->trans_lock);
224
225                 wait_event(root->fs_info->transaction_wait,
226                            !cur_trans->blocked);
227                 put_transaction(cur_trans);
228         } else {
229                 spin_unlock(&root->fs_info->trans_lock);
230         }
231 }
232
233 enum btrfs_trans_type {
234         TRANS_START,
235         TRANS_JOIN,
236         TRANS_USERSPACE,
237         TRANS_JOIN_NOLOCK,
238 };
239
240 static int may_wait_transaction(struct btrfs_root *root, int type)
241 {
242         if (root->fs_info->log_root_recovering)
243                 return 0;
244
245         if (type == TRANS_USERSPACE)
246                 return 1;
247
248         if (type == TRANS_START &&
249             !atomic_read(&root->fs_info->open_ioctl_trans))
250                 return 1;
251
252         return 0;
253 }
254
255 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
256                                                     u64 num_items, int type)
257 {
258         struct btrfs_trans_handle *h;
259         struct btrfs_transaction *cur_trans;
260         u64 num_bytes = 0;
261         int ret;
262
263         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
264                 return ERR_PTR(-EROFS);
265
266         if (current->journal_info) {
267                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
268                 h = current->journal_info;
269                 h->use_count++;
270                 h->orig_rsv = h->block_rsv;
271                 h->block_rsv = NULL;
272                 goto got_it;
273         }
274
275         /*
276          * Do the reservation before we join the transaction so we can do all
277          * the appropriate flushing if need be.
278          */
279         if (num_items > 0 && root != root->fs_info->chunk_root) {
280                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
281                 ret = btrfs_block_rsv_add(root,
282                                           &root->fs_info->trans_block_rsv,
283                                           num_bytes);
284                 if (ret)
285                         return ERR_PTR(ret);
286         }
287 again:
288         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
289         if (!h)
290                 return ERR_PTR(-ENOMEM);
291
292         if (may_wait_transaction(root, type))
293                 wait_current_trans(root);
294
295         do {
296                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
297                 if (ret == -EBUSY)
298                         wait_current_trans(root);
299         } while (ret == -EBUSY);
300
301         if (ret < 0) {
302                 kmem_cache_free(btrfs_trans_handle_cachep, h);
303                 return ERR_PTR(ret);
304         }
305
306         cur_trans = root->fs_info->running_transaction;
307
308         h->transid = cur_trans->transid;
309         h->transaction = cur_trans;
310         h->blocks_used = 0;
311         h->bytes_reserved = 0;
312         h->delayed_ref_updates = 0;
313         h->use_count = 1;
314         h->block_rsv = NULL;
315         h->orig_rsv = NULL;
316
317         smp_mb();
318         if (cur_trans->blocked && may_wait_transaction(root, type)) {
319                 btrfs_commit_transaction(h, root);
320                 goto again;
321         }
322
323         if (num_bytes) {
324                 h->block_rsv = &root->fs_info->trans_block_rsv;
325                 h->bytes_reserved = num_bytes;
326         }
327
328 got_it:
329         btrfs_record_root_in_trans(h, root);
330
331         if (!current->journal_info && type != TRANS_USERSPACE)
332                 current->journal_info = h;
333         return h;
334 }
335
336 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
337                                                    int num_items)
338 {
339         return start_transaction(root, num_items, TRANS_START);
340 }
341 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
342 {
343         return start_transaction(root, 0, TRANS_JOIN);
344 }
345
346 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
347 {
348         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
349 }
350
351 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
352 {
353         return start_transaction(root, 0, TRANS_USERSPACE);
354 }
355
356 /* wait for a transaction commit to be fully complete */
357 static noinline void wait_for_commit(struct btrfs_root *root,
358                                     struct btrfs_transaction *commit)
359 {
360         wait_event(commit->commit_wait, commit->commit_done);
361 }
362
363 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
364 {
365         struct btrfs_transaction *cur_trans = NULL, *t;
366         int ret;
367
368         ret = 0;
369         if (transid) {
370                 if (transid <= root->fs_info->last_trans_committed)
371                         goto out;
372
373                 /* find specified transaction */
374                 spin_lock(&root->fs_info->trans_lock);
375                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
376                         if (t->transid == transid) {
377                                 cur_trans = t;
378                                 atomic_inc(&cur_trans->use_count);
379                                 break;
380                         }
381                         if (t->transid > transid)
382                                 break;
383                 }
384                 spin_unlock(&root->fs_info->trans_lock);
385                 ret = -EINVAL;
386                 if (!cur_trans)
387                         goto out;  /* bad transid */
388         } else {
389                 /* find newest transaction that is committing | committed */
390                 spin_lock(&root->fs_info->trans_lock);
391                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
392                                             list) {
393                         if (t->in_commit) {
394                                 if (t->commit_done)
395                                         break;
396                                 cur_trans = t;
397                                 atomic_inc(&cur_trans->use_count);
398                                 break;
399                         }
400                 }
401                 spin_unlock(&root->fs_info->trans_lock);
402                 if (!cur_trans)
403                         goto out;  /* nothing committing|committed */
404         }
405
406         wait_for_commit(root, cur_trans);
407
408         put_transaction(cur_trans);
409         ret = 0;
410 out:
411         return ret;
412 }
413
414 void btrfs_throttle(struct btrfs_root *root)
415 {
416         if (!atomic_read(&root->fs_info->open_ioctl_trans))
417                 wait_current_trans(root);
418 }
419
420 static int should_end_transaction(struct btrfs_trans_handle *trans,
421                                   struct btrfs_root *root)
422 {
423         int ret;
424
425         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
426         return ret ? 1 : 0;
427 }
428
429 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
430                                  struct btrfs_root *root)
431 {
432         struct btrfs_transaction *cur_trans = trans->transaction;
433         struct btrfs_block_rsv *rsv = trans->block_rsv;
434         int updates;
435
436         smp_mb();
437         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
438                 return 1;
439
440         /*
441          * We need to do this in case we're deleting csums so the global block
442          * rsv get's used instead of the csum block rsv.
443          */
444         trans->block_rsv = NULL;
445
446         updates = trans->delayed_ref_updates;
447         trans->delayed_ref_updates = 0;
448         if (updates)
449                 btrfs_run_delayed_refs(trans, root, updates);
450
451         trans->block_rsv = rsv;
452
453         return should_end_transaction(trans, root);
454 }
455
456 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
457                           struct btrfs_root *root, int throttle, int lock)
458 {
459         struct btrfs_transaction *cur_trans = trans->transaction;
460         struct btrfs_fs_info *info = root->fs_info;
461         int count = 0;
462
463         if (--trans->use_count) {
464                 trans->block_rsv = trans->orig_rsv;
465                 return 0;
466         }
467
468         btrfs_trans_release_metadata(trans, root);
469         trans->block_rsv = NULL;
470         while (count < 2) {
471                 unsigned long cur = trans->delayed_ref_updates;
472                 trans->delayed_ref_updates = 0;
473                 if (cur &&
474                     trans->transaction->delayed_refs.num_heads_ready > 64) {
475                         trans->delayed_ref_updates = 0;
476                         btrfs_run_delayed_refs(trans, root, cur);
477                 } else {
478                         break;
479                 }
480                 count++;
481         }
482
483         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
484             should_end_transaction(trans, root)) {
485                 trans->transaction->blocked = 1;
486                 smp_wmb();
487         }
488
489         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
490                 if (throttle) {
491                         /*
492                          * We may race with somebody else here so end up having
493                          * to call end_transaction on ourselves again, so inc
494                          * our use_count.
495                          */
496                         trans->use_count++;
497                         return btrfs_commit_transaction(trans, root);
498                 } else {
499                         wake_up_process(info->transaction_kthread);
500                 }
501         }
502
503         WARN_ON(cur_trans != info->running_transaction);
504         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
505         atomic_dec(&cur_trans->num_writers);
506
507         smp_mb();
508         if (waitqueue_active(&cur_trans->writer_wait))
509                 wake_up(&cur_trans->writer_wait);
510         put_transaction(cur_trans);
511
512         if (current->journal_info == trans)
513                 current->journal_info = NULL;
514         memset(trans, 0, sizeof(*trans));
515         kmem_cache_free(btrfs_trans_handle_cachep, trans);
516
517         if (throttle)
518                 btrfs_run_delayed_iputs(root);
519
520         return 0;
521 }
522
523 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
524                           struct btrfs_root *root)
525 {
526         int ret;
527
528         ret = __btrfs_end_transaction(trans, root, 0, 1);
529         if (ret)
530                 return ret;
531         return 0;
532 }
533
534 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
535                                    struct btrfs_root *root)
536 {
537         int ret;
538
539         ret = __btrfs_end_transaction(trans, root, 1, 1);
540         if (ret)
541                 return ret;
542         return 0;
543 }
544
545 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
546                                  struct btrfs_root *root)
547 {
548         int ret;
549
550         ret = __btrfs_end_transaction(trans, root, 0, 0);
551         if (ret)
552                 return ret;
553         return 0;
554 }
555
556 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
557                                 struct btrfs_root *root)
558 {
559         return __btrfs_end_transaction(trans, root, 1, 1);
560 }
561
562 /*
563  * when btree blocks are allocated, they have some corresponding bits set for
564  * them in one of two extent_io trees.  This is used to make sure all of
565  * those extents are sent to disk but does not wait on them
566  */
567 int btrfs_write_marked_extents(struct btrfs_root *root,
568                                struct extent_io_tree *dirty_pages, int mark)
569 {
570         int err = 0;
571         int werr = 0;
572         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
573         u64 start = 0;
574         u64 end;
575
576         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
577                                       mark)) {
578                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
579                                    GFP_NOFS);
580                 err = filemap_fdatawrite_range(mapping, start, end);
581                 if (err)
582                         werr = err;
583                 cond_resched();
584                 start = end + 1;
585         }
586         if (err)
587                 werr = err;
588         return werr;
589 }
590
591 /*
592  * when btree blocks are allocated, they have some corresponding bits set for
593  * them in one of two extent_io trees.  This is used to make sure all of
594  * those extents are on disk for transaction or log commit.  We wait
595  * on all the pages and clear them from the dirty pages state tree
596  */
597 int btrfs_wait_marked_extents(struct btrfs_root *root,
598                               struct extent_io_tree *dirty_pages, int mark)
599 {
600         int err = 0;
601         int werr = 0;
602         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
603         u64 start = 0;
604         u64 end;
605
606         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
607                                       EXTENT_NEED_WAIT)) {
608                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
609                 err = filemap_fdatawait_range(mapping, start, end);
610                 if (err)
611                         werr = err;
612                 cond_resched();
613                 start = end + 1;
614         }
615         if (err)
616                 werr = err;
617         return werr;
618 }
619
620 /*
621  * when btree blocks are allocated, they have some corresponding bits set for
622  * them in one of two extent_io trees.  This is used to make sure all of
623  * those extents are on disk for transaction or log commit
624  */
625 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
626                                 struct extent_io_tree *dirty_pages, int mark)
627 {
628         int ret;
629         int ret2;
630
631         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
632         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
633
634         if (ret)
635                 return ret;
636         if (ret2)
637                 return ret2;
638         return 0;
639 }
640
641 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
642                                      struct btrfs_root *root)
643 {
644         if (!trans || !trans->transaction) {
645                 struct inode *btree_inode;
646                 btree_inode = root->fs_info->btree_inode;
647                 return filemap_write_and_wait(btree_inode->i_mapping);
648         }
649         return btrfs_write_and_wait_marked_extents(root,
650                                            &trans->transaction->dirty_pages,
651                                            EXTENT_DIRTY);
652 }
653
654 /*
655  * this is used to update the root pointer in the tree of tree roots.
656  *
657  * But, in the case of the extent allocation tree, updating the root
658  * pointer may allocate blocks which may change the root of the extent
659  * allocation tree.
660  *
661  * So, this loops and repeats and makes sure the cowonly root didn't
662  * change while the root pointer was being updated in the metadata.
663  */
664 static int update_cowonly_root(struct btrfs_trans_handle *trans,
665                                struct btrfs_root *root)
666 {
667         int ret;
668         u64 old_root_bytenr;
669         u64 old_root_used;
670         struct btrfs_root *tree_root = root->fs_info->tree_root;
671
672         old_root_used = btrfs_root_used(&root->root_item);
673         btrfs_write_dirty_block_groups(trans, root);
674
675         while (1) {
676                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
677                 if (old_root_bytenr == root->node->start &&
678                     old_root_used == btrfs_root_used(&root->root_item))
679                         break;
680
681                 btrfs_set_root_node(&root->root_item, root->node);
682                 ret = btrfs_update_root(trans, tree_root,
683                                         &root->root_key,
684                                         &root->root_item);
685                 BUG_ON(ret);
686
687                 old_root_used = btrfs_root_used(&root->root_item);
688                 ret = btrfs_write_dirty_block_groups(trans, root);
689                 BUG_ON(ret);
690         }
691
692         if (root != root->fs_info->extent_root)
693                 switch_commit_root(root);
694
695         return 0;
696 }
697
698 /*
699  * update all the cowonly tree roots on disk
700  */
701 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
702                                          struct btrfs_root *root)
703 {
704         struct btrfs_fs_info *fs_info = root->fs_info;
705         struct list_head *next;
706         struct extent_buffer *eb;
707         int ret;
708
709         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
710         BUG_ON(ret);
711
712         eb = btrfs_lock_root_node(fs_info->tree_root);
713         btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
714         btrfs_tree_unlock(eb);
715         free_extent_buffer(eb);
716
717         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
718         BUG_ON(ret);
719
720         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
721                 next = fs_info->dirty_cowonly_roots.next;
722                 list_del_init(next);
723                 root = list_entry(next, struct btrfs_root, dirty_list);
724
725                 update_cowonly_root(trans, root);
726         }
727
728         down_write(&fs_info->extent_commit_sem);
729         switch_commit_root(fs_info->extent_root);
730         up_write(&fs_info->extent_commit_sem);
731
732         return 0;
733 }
734
735 /*
736  * dead roots are old snapshots that need to be deleted.  This allocates
737  * a dirty root struct and adds it into the list of dead roots that need to
738  * be deleted
739  */
740 int btrfs_add_dead_root(struct btrfs_root *root)
741 {
742         spin_lock(&root->fs_info->trans_lock);
743         list_add(&root->root_list, &root->fs_info->dead_roots);
744         spin_unlock(&root->fs_info->trans_lock);
745         return 0;
746 }
747
748 /*
749  * update all the cowonly tree roots on disk
750  */
751 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
752                                     struct btrfs_root *root)
753 {
754         struct btrfs_root *gang[8];
755         struct btrfs_fs_info *fs_info = root->fs_info;
756         int i;
757         int ret;
758         int err = 0;
759
760         spin_lock(&fs_info->fs_roots_radix_lock);
761         while (1) {
762                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
763                                                  (void **)gang, 0,
764                                                  ARRAY_SIZE(gang),
765                                                  BTRFS_ROOT_TRANS_TAG);
766                 if (ret == 0)
767                         break;
768                 for (i = 0; i < ret; i++) {
769                         root = gang[i];
770                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
771                                         (unsigned long)root->root_key.objectid,
772                                         BTRFS_ROOT_TRANS_TAG);
773                         spin_unlock(&fs_info->fs_roots_radix_lock);
774
775                         btrfs_free_log(trans, root);
776                         btrfs_update_reloc_root(trans, root);
777                         btrfs_orphan_commit_root(trans, root);
778
779                         btrfs_save_ino_cache(root, trans);
780
781                         /* see comments in should_cow_block() */
782                         root->force_cow = 0;
783                         smp_wmb();
784
785                         if (root->commit_root != root->node) {
786                                 mutex_lock(&root->fs_commit_mutex);
787                                 switch_commit_root(root);
788                                 btrfs_unpin_free_ino(root);
789                                 mutex_unlock(&root->fs_commit_mutex);
790
791                                 btrfs_set_root_node(&root->root_item,
792                                                     root->node);
793                         }
794
795                         err = btrfs_update_root(trans, fs_info->tree_root,
796                                                 &root->root_key,
797                                                 &root->root_item);
798                         spin_lock(&fs_info->fs_roots_radix_lock);
799                         if (err)
800                                 break;
801                 }
802         }
803         spin_unlock(&fs_info->fs_roots_radix_lock);
804         return err;
805 }
806
807 /*
808  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
809  * otherwise every leaf in the btree is read and defragged.
810  */
811 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
812 {
813         struct btrfs_fs_info *info = root->fs_info;
814         struct btrfs_trans_handle *trans;
815         int ret;
816         unsigned long nr;
817
818         if (xchg(&root->defrag_running, 1))
819                 return 0;
820
821         while (1) {
822                 trans = btrfs_start_transaction(root, 0);
823                 if (IS_ERR(trans))
824                         return PTR_ERR(trans);
825
826                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
827
828                 nr = trans->blocks_used;
829                 btrfs_end_transaction(trans, root);
830                 btrfs_btree_balance_dirty(info->tree_root, nr);
831                 cond_resched();
832
833                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
834                         break;
835         }
836         root->defrag_running = 0;
837         return ret;
838 }
839
840 /*
841  * new snapshots need to be created at a very specific time in the
842  * transaction commit.  This does the actual creation
843  */
844 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
845                                    struct btrfs_fs_info *fs_info,
846                                    struct btrfs_pending_snapshot *pending)
847 {
848         struct btrfs_key key;
849         struct btrfs_root_item *new_root_item;
850         struct btrfs_root *tree_root = fs_info->tree_root;
851         struct btrfs_root *root = pending->root;
852         struct btrfs_root *parent_root;
853         struct btrfs_block_rsv *rsv;
854         struct inode *parent_inode;
855         struct dentry *parent;
856         struct dentry *dentry;
857         struct extent_buffer *tmp;
858         struct extent_buffer *old;
859         int ret;
860         u64 to_reserve = 0;
861         u64 index = 0;
862         u64 objectid;
863         u64 root_flags;
864
865         rsv = trans->block_rsv;
866
867         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
868         if (!new_root_item) {
869                 pending->error = -ENOMEM;
870                 goto fail;
871         }
872
873         ret = btrfs_find_free_objectid(tree_root, &objectid);
874         if (ret) {
875                 pending->error = ret;
876                 goto fail;
877         }
878
879         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
880
881         if (to_reserve > 0) {
882                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
883                                                   to_reserve);
884                 if (ret) {
885                         pending->error = ret;
886                         goto fail;
887                 }
888         }
889
890         key.objectid = objectid;
891         key.offset = (u64)-1;
892         key.type = BTRFS_ROOT_ITEM_KEY;
893
894         trans->block_rsv = &pending->block_rsv;
895
896         dentry = pending->dentry;
897         parent = dget_parent(dentry);
898         parent_inode = parent->d_inode;
899         parent_root = BTRFS_I(parent_inode)->root;
900         record_root_in_trans(trans, parent_root);
901
902         /*
903          * insert the directory item
904          */
905         ret = btrfs_set_inode_index(parent_inode, &index);
906         BUG_ON(ret);
907         ret = btrfs_insert_dir_item(trans, parent_root,
908                                 dentry->d_name.name, dentry->d_name.len,
909                                 parent_inode, &key,
910                                 BTRFS_FT_DIR, index);
911         BUG_ON(ret);
912
913         btrfs_i_size_write(parent_inode, parent_inode->i_size +
914                                          dentry->d_name.len * 2);
915         ret = btrfs_update_inode(trans, parent_root, parent_inode);
916         BUG_ON(ret);
917
918         /*
919          * pull in the delayed directory update
920          * and the delayed inode item
921          * otherwise we corrupt the FS during
922          * snapshot
923          */
924         ret = btrfs_run_delayed_items(trans, root);
925         BUG_ON(ret);
926
927         record_root_in_trans(trans, root);
928         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
929         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
930         btrfs_check_and_init_root_item(new_root_item);
931
932         root_flags = btrfs_root_flags(new_root_item);
933         if (pending->readonly)
934                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
935         else
936                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
937         btrfs_set_root_flags(new_root_item, root_flags);
938
939         old = btrfs_lock_root_node(root);
940         btrfs_cow_block(trans, root, old, NULL, 0, &old);
941         btrfs_set_lock_blocking(old);
942
943         btrfs_copy_root(trans, root, old, &tmp, objectid);
944         btrfs_tree_unlock(old);
945         free_extent_buffer(old);
946
947         /* see comments in should_cow_block() */
948         root->force_cow = 1;
949         smp_wmb();
950
951         btrfs_set_root_node(new_root_item, tmp);
952         /* record when the snapshot was created in key.offset */
953         key.offset = trans->transid;
954         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
955         btrfs_tree_unlock(tmp);
956         free_extent_buffer(tmp);
957         BUG_ON(ret);
958
959         /*
960          * insert root back/forward references
961          */
962         ret = btrfs_add_root_ref(trans, tree_root, objectid,
963                                  parent_root->root_key.objectid,
964                                  btrfs_ino(parent_inode), index,
965                                  dentry->d_name.name, dentry->d_name.len);
966         BUG_ON(ret);
967         dput(parent);
968
969         key.offset = (u64)-1;
970         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
971         BUG_ON(IS_ERR(pending->snap));
972
973         btrfs_reloc_post_snapshot(trans, pending);
974 fail:
975         kfree(new_root_item);
976         trans->block_rsv = rsv;
977         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
978         return 0;
979 }
980
981 /*
982  * create all the snapshots we've scheduled for creation
983  */
984 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
985                                              struct btrfs_fs_info *fs_info)
986 {
987         struct btrfs_pending_snapshot *pending;
988         struct list_head *head = &trans->transaction->pending_snapshots;
989         int ret;
990
991         list_for_each_entry(pending, head, list) {
992                 ret = create_pending_snapshot(trans, fs_info, pending);
993                 BUG_ON(ret);
994         }
995         return 0;
996 }
997
998 static void update_super_roots(struct btrfs_root *root)
999 {
1000         struct btrfs_root_item *root_item;
1001         struct btrfs_super_block *super;
1002
1003         super = root->fs_info->super_copy;
1004
1005         root_item = &root->fs_info->chunk_root->root_item;
1006         super->chunk_root = root_item->bytenr;
1007         super->chunk_root_generation = root_item->generation;
1008         super->chunk_root_level = root_item->level;
1009
1010         root_item = &root->fs_info->tree_root->root_item;
1011         super->root = root_item->bytenr;
1012         super->generation = root_item->generation;
1013         super->root_level = root_item->level;
1014         if (btrfs_test_opt(root, SPACE_CACHE))
1015                 super->cache_generation = root_item->generation;
1016 }
1017
1018 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1019 {
1020         int ret = 0;
1021         spin_lock(&info->trans_lock);
1022         if (info->running_transaction)
1023                 ret = info->running_transaction->in_commit;
1024         spin_unlock(&info->trans_lock);
1025         return ret;
1026 }
1027
1028 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1029 {
1030         int ret = 0;
1031         spin_lock(&info->trans_lock);
1032         if (info->running_transaction)
1033                 ret = info->running_transaction->blocked;
1034         spin_unlock(&info->trans_lock);
1035         return ret;
1036 }
1037
1038 /*
1039  * wait for the current transaction commit to start and block subsequent
1040  * transaction joins
1041  */
1042 static void wait_current_trans_commit_start(struct btrfs_root *root,
1043                                             struct btrfs_transaction *trans)
1044 {
1045         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1046 }
1047
1048 /*
1049  * wait for the current transaction to start and then become unblocked.
1050  * caller holds ref.
1051  */
1052 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1053                                          struct btrfs_transaction *trans)
1054 {
1055         wait_event(root->fs_info->transaction_wait,
1056                    trans->commit_done || (trans->in_commit && !trans->blocked));
1057 }
1058
1059 /*
1060  * commit transactions asynchronously. once btrfs_commit_transaction_async
1061  * returns, any subsequent transaction will not be allowed to join.
1062  */
1063 struct btrfs_async_commit {
1064         struct btrfs_trans_handle *newtrans;
1065         struct btrfs_root *root;
1066         struct delayed_work work;
1067 };
1068
1069 static void do_async_commit(struct work_struct *work)
1070 {
1071         struct btrfs_async_commit *ac =
1072                 container_of(work, struct btrfs_async_commit, work.work);
1073
1074         btrfs_commit_transaction(ac->newtrans, ac->root);
1075         kfree(ac);
1076 }
1077
1078 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1079                                    struct btrfs_root *root,
1080                                    int wait_for_unblock)
1081 {
1082         struct btrfs_async_commit *ac;
1083         struct btrfs_transaction *cur_trans;
1084
1085         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1086         if (!ac)
1087                 return -ENOMEM;
1088
1089         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1090         ac->root = root;
1091         ac->newtrans = btrfs_join_transaction(root);
1092         if (IS_ERR(ac->newtrans)) {
1093                 int err = PTR_ERR(ac->newtrans);
1094                 kfree(ac);
1095                 return err;
1096         }
1097
1098         /* take transaction reference */
1099         cur_trans = trans->transaction;
1100         atomic_inc(&cur_trans->use_count);
1101
1102         btrfs_end_transaction(trans, root);
1103         schedule_delayed_work(&ac->work, 0);
1104
1105         /* wait for transaction to start and unblock */
1106         if (wait_for_unblock)
1107                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1108         else
1109                 wait_current_trans_commit_start(root, cur_trans);
1110
1111         if (current->journal_info == trans)
1112                 current->journal_info = NULL;
1113
1114         put_transaction(cur_trans);
1115         return 0;
1116 }
1117
1118 /*
1119  * btrfs_transaction state sequence:
1120  *    in_commit = 0, blocked = 0  (initial)
1121  *    in_commit = 1, blocked = 1
1122  *    blocked = 0
1123  *    commit_done = 1
1124  */
1125 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1126                              struct btrfs_root *root)
1127 {
1128         unsigned long joined = 0;
1129         struct btrfs_transaction *cur_trans;
1130         struct btrfs_transaction *prev_trans = NULL;
1131         DEFINE_WAIT(wait);
1132         int ret;
1133         int should_grow = 0;
1134         unsigned long now = get_seconds();
1135         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1136
1137         btrfs_run_ordered_operations(root, 0);
1138
1139         btrfs_trans_release_metadata(trans, root);
1140         trans->block_rsv = NULL;
1141
1142         /* make a pass through all the delayed refs we have so far
1143          * any runnings procs may add more while we are here
1144          */
1145         ret = btrfs_run_delayed_refs(trans, root, 0);
1146         BUG_ON(ret);
1147
1148         cur_trans = trans->transaction;
1149         /*
1150          * set the flushing flag so procs in this transaction have to
1151          * start sending their work down.
1152          */
1153         cur_trans->delayed_refs.flushing = 1;
1154
1155         ret = btrfs_run_delayed_refs(trans, root, 0);
1156         BUG_ON(ret);
1157
1158         spin_lock(&cur_trans->commit_lock);
1159         if (cur_trans->in_commit) {
1160                 spin_unlock(&cur_trans->commit_lock);
1161                 atomic_inc(&cur_trans->use_count);
1162                 btrfs_end_transaction(trans, root);
1163
1164                 wait_for_commit(root, cur_trans);
1165
1166                 put_transaction(cur_trans);
1167
1168                 return 0;
1169         }
1170
1171         trans->transaction->in_commit = 1;
1172         trans->transaction->blocked = 1;
1173         spin_unlock(&cur_trans->commit_lock);
1174         wake_up(&root->fs_info->transaction_blocked_wait);
1175
1176         spin_lock(&root->fs_info->trans_lock);
1177         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1178                 prev_trans = list_entry(cur_trans->list.prev,
1179                                         struct btrfs_transaction, list);
1180                 if (!prev_trans->commit_done) {
1181                         atomic_inc(&prev_trans->use_count);
1182                         spin_unlock(&root->fs_info->trans_lock);
1183
1184                         wait_for_commit(root, prev_trans);
1185
1186                         put_transaction(prev_trans);
1187                 } else {
1188                         spin_unlock(&root->fs_info->trans_lock);
1189                 }
1190         } else {
1191                 spin_unlock(&root->fs_info->trans_lock);
1192         }
1193
1194         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1195                 should_grow = 1;
1196
1197         do {
1198                 int snap_pending = 0;
1199
1200                 joined = cur_trans->num_joined;
1201                 if (!list_empty(&trans->transaction->pending_snapshots))
1202                         snap_pending = 1;
1203
1204                 WARN_ON(cur_trans != trans->transaction);
1205
1206                 if (flush_on_commit || snap_pending) {
1207                         btrfs_start_delalloc_inodes(root, 1);
1208                         ret = btrfs_wait_ordered_extents(root, 0, 1);
1209                         BUG_ON(ret);
1210                 }
1211
1212                 ret = btrfs_run_delayed_items(trans, root);
1213                 BUG_ON(ret);
1214
1215                 /*
1216                  * rename don't use btrfs_join_transaction, so, once we
1217                  * set the transaction to blocked above, we aren't going
1218                  * to get any new ordered operations.  We can safely run
1219                  * it here and no for sure that nothing new will be added
1220                  * to the list
1221                  */
1222                 btrfs_run_ordered_operations(root, 1);
1223
1224                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1225                                 TASK_UNINTERRUPTIBLE);
1226
1227                 if (atomic_read(&cur_trans->num_writers) > 1)
1228                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1229                 else if (should_grow)
1230                         schedule_timeout(1);
1231
1232                 finish_wait(&cur_trans->writer_wait, &wait);
1233         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1234                  (should_grow && cur_trans->num_joined != joined));
1235
1236         /*
1237          * Ok now we need to make sure to block out any other joins while we
1238          * commit the transaction.  We could have started a join before setting
1239          * no_join so make sure to wait for num_writers to == 1 again.
1240          */
1241         spin_lock(&root->fs_info->trans_lock);
1242         root->fs_info->trans_no_join = 1;
1243         spin_unlock(&root->fs_info->trans_lock);
1244         wait_event(cur_trans->writer_wait,
1245                    atomic_read(&cur_trans->num_writers) == 1);
1246
1247         /*
1248          * the reloc mutex makes sure that we stop
1249          * the balancing code from coming in and moving
1250          * extents around in the middle of the commit
1251          */
1252         mutex_lock(&root->fs_info->reloc_mutex);
1253
1254         ret = btrfs_run_delayed_items(trans, root);
1255         BUG_ON(ret);
1256
1257         ret = create_pending_snapshots(trans, root->fs_info);
1258         BUG_ON(ret);
1259
1260         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1261         BUG_ON(ret);
1262
1263         /*
1264          * make sure none of the code above managed to slip in a
1265          * delayed item
1266          */
1267         btrfs_assert_delayed_root_empty(root);
1268
1269         WARN_ON(cur_trans != trans->transaction);
1270
1271         btrfs_scrub_pause(root);
1272         /* btrfs_commit_tree_roots is responsible for getting the
1273          * various roots consistent with each other.  Every pointer
1274          * in the tree of tree roots has to point to the most up to date
1275          * root for every subvolume and other tree.  So, we have to keep
1276          * the tree logging code from jumping in and changing any
1277          * of the trees.
1278          *
1279          * At this point in the commit, there can't be any tree-log
1280          * writers, but a little lower down we drop the trans mutex
1281          * and let new people in.  By holding the tree_log_mutex
1282          * from now until after the super is written, we avoid races
1283          * with the tree-log code.
1284          */
1285         mutex_lock(&root->fs_info->tree_log_mutex);
1286
1287         ret = commit_fs_roots(trans, root);
1288         BUG_ON(ret);
1289
1290         /* commit_fs_roots gets rid of all the tree log roots, it is now
1291          * safe to free the root of tree log roots
1292          */
1293         btrfs_free_log_root_tree(trans, root->fs_info);
1294
1295         ret = commit_cowonly_roots(trans, root);
1296         BUG_ON(ret);
1297
1298         btrfs_prepare_extent_commit(trans, root);
1299
1300         cur_trans = root->fs_info->running_transaction;
1301
1302         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1303                             root->fs_info->tree_root->node);
1304         switch_commit_root(root->fs_info->tree_root);
1305
1306         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1307                             root->fs_info->chunk_root->node);
1308         switch_commit_root(root->fs_info->chunk_root);
1309
1310         update_super_roots(root);
1311
1312         if (!root->fs_info->log_root_recovering) {
1313                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1314                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1315         }
1316
1317         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1318                sizeof(*root->fs_info->super_copy));
1319
1320         trans->transaction->blocked = 0;
1321         spin_lock(&root->fs_info->trans_lock);
1322         root->fs_info->running_transaction = NULL;
1323         root->fs_info->trans_no_join = 0;
1324         spin_unlock(&root->fs_info->trans_lock);
1325         mutex_unlock(&root->fs_info->reloc_mutex);
1326
1327         wake_up(&root->fs_info->transaction_wait);
1328
1329         ret = btrfs_write_and_wait_transaction(trans, root);
1330         BUG_ON(ret);
1331         write_ctree_super(trans, root, 0);
1332
1333         /*
1334          * the super is written, we can safely allow the tree-loggers
1335          * to go about their business
1336          */
1337         mutex_unlock(&root->fs_info->tree_log_mutex);
1338
1339         btrfs_finish_extent_commit(trans, root);
1340
1341         cur_trans->commit_done = 1;
1342
1343         root->fs_info->last_trans_committed = cur_trans->transid;
1344
1345         wake_up(&cur_trans->commit_wait);
1346
1347         spin_lock(&root->fs_info->trans_lock);
1348         list_del_init(&cur_trans->list);
1349         spin_unlock(&root->fs_info->trans_lock);
1350
1351         put_transaction(cur_trans);
1352         put_transaction(cur_trans);
1353
1354         trace_btrfs_transaction_commit(root);
1355
1356         btrfs_scrub_continue(root);
1357
1358         if (current->journal_info == trans)
1359                 current->journal_info = NULL;
1360
1361         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1362
1363         if (current != root->fs_info->transaction_kthread)
1364                 btrfs_run_delayed_iputs(root);
1365
1366         return ret;
1367 }
1368
1369 /*
1370  * interface function to delete all the snapshots we have scheduled for deletion
1371  */
1372 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1373 {
1374         LIST_HEAD(list);
1375         struct btrfs_fs_info *fs_info = root->fs_info;
1376
1377         spin_lock(&fs_info->trans_lock);
1378         list_splice_init(&fs_info->dead_roots, &list);
1379         spin_unlock(&fs_info->trans_lock);
1380
1381         while (!list_empty(&list)) {
1382                 root = list_entry(list.next, struct btrfs_root, root_list);
1383                 list_del(&root->root_list);
1384
1385                 btrfs_kill_all_delayed_nodes(root);
1386
1387                 if (btrfs_header_backref_rev(root->node) <
1388                     BTRFS_MIXED_BACKREF_REV)
1389                         btrfs_drop_snapshot(root, NULL, 0);
1390                 else
1391                         btrfs_drop_snapshot(root, NULL, 1);
1392         }
1393         return 0;
1394 }