]> Pileus Git - ~andy/linux/blob - fs/btrfs/transaction.c
Btrfs: Add a leaf reference cache
[~andy/linux] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "locking.h"
27 #include "ref-cache.h"
28
29 static int total_trans = 0;
30 extern struct kmem_cache *btrfs_trans_handle_cachep;
31 extern struct kmem_cache *btrfs_transaction_cachep;
32
33 #define BTRFS_ROOT_TRANS_TAG 0
34
35 struct dirty_root {
36         struct list_head list;
37         struct btrfs_root *root;
38         struct btrfs_root *latest_root;
39         struct btrfs_leaf_ref_tree ref_tree;
40 };
41
42 static noinline void put_transaction(struct btrfs_transaction *transaction)
43 {
44         WARN_ON(transaction->use_count == 0);
45         transaction->use_count--;
46         if (transaction->use_count == 0) {
47                 WARN_ON(total_trans == 0);
48                 total_trans--;
49                 list_del_init(&transaction->list);
50                 memset(transaction, 0, sizeof(*transaction));
51                 kmem_cache_free(btrfs_transaction_cachep, transaction);
52         }
53 }
54
55 static noinline int join_transaction(struct btrfs_root *root)
56 {
57         struct btrfs_transaction *cur_trans;
58         cur_trans = root->fs_info->running_transaction;
59         if (!cur_trans) {
60                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
61                                              GFP_NOFS);
62                 total_trans++;
63                 BUG_ON(!cur_trans);
64                 root->fs_info->generation++;
65                 root->fs_info->last_alloc = 0;
66                 root->fs_info->last_data_alloc = 0;
67                 cur_trans->num_writers = 1;
68                 cur_trans->num_joined = 0;
69                 cur_trans->transid = root->fs_info->generation;
70                 init_waitqueue_head(&cur_trans->writer_wait);
71                 init_waitqueue_head(&cur_trans->commit_wait);
72                 cur_trans->in_commit = 0;
73                 cur_trans->blocked = 0;
74                 cur_trans->use_count = 1;
75                 cur_trans->commit_done = 0;
76                 cur_trans->start_time = get_seconds();
77                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
78                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
79                 extent_io_tree_init(&cur_trans->dirty_pages,
80                                      root->fs_info->btree_inode->i_mapping,
81                                      GFP_NOFS);
82                 spin_lock(&root->fs_info->new_trans_lock);
83                 root->fs_info->running_transaction = cur_trans;
84                 spin_unlock(&root->fs_info->new_trans_lock);
85         } else {
86                 cur_trans->num_writers++;
87                 cur_trans->num_joined++;
88         }
89
90         return 0;
91 }
92
93 static noinline int record_root_in_trans(struct btrfs_root *root)
94 {
95         struct dirty_root *dirty;
96         u64 running_trans_id = root->fs_info->running_transaction->transid;
97         if (root->ref_cows && root->last_trans < running_trans_id) {
98                 WARN_ON(root == root->fs_info->extent_root);
99                 if (root->root_item.refs != 0) {
100                         radix_tree_tag_set(&root->fs_info->fs_roots_radix,
101                                    (unsigned long)root->root_key.objectid,
102                                    BTRFS_ROOT_TRANS_TAG);
103
104                         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
105                         BUG_ON(!dirty);
106                         dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
107                         BUG_ON(!dirty->root);
108
109                         dirty->latest_root = root;
110                         INIT_LIST_HEAD(&dirty->list);
111                         btrfs_leaf_ref_tree_init(&dirty->ref_tree);
112                         dirty->ref_tree.generation = running_trans_id;
113
114                         root->commit_root = btrfs_root_node(root);
115                         root->ref_tree = &dirty->ref_tree;
116
117                         memcpy(dirty->root, root, sizeof(*root));
118                         spin_lock_init(&dirty->root->node_lock);
119                         mutex_init(&dirty->root->objectid_mutex);
120                         dirty->root->node = root->commit_root;
121                         dirty->root->commit_root = NULL;
122                 } else {
123                         WARN_ON(1);
124                 }
125                 root->last_trans = running_trans_id;
126         }
127         return 0;
128 }
129
130 struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
131                                              int num_blocks, int join)
132 {
133         struct btrfs_trans_handle *h =
134                 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
135         struct btrfs_transaction *cur_trans;
136         int ret;
137
138         mutex_lock(&root->fs_info->trans_mutex);
139         cur_trans = root->fs_info->running_transaction;
140         if (cur_trans && cur_trans->blocked && !join) {
141                 DEFINE_WAIT(wait);
142                 cur_trans->use_count++;
143                 while(1) {
144                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
145                                         TASK_UNINTERRUPTIBLE);
146                         if (cur_trans->blocked) {
147                                 mutex_unlock(&root->fs_info->trans_mutex);
148                                 schedule();
149                                 mutex_lock(&root->fs_info->trans_mutex);
150                                 finish_wait(&root->fs_info->transaction_wait,
151                                             &wait);
152                         } else {
153                                 finish_wait(&root->fs_info->transaction_wait,
154                                             &wait);
155                                 break;
156                         }
157                 }
158                 put_transaction(cur_trans);
159         }
160         ret = join_transaction(root);
161         BUG_ON(ret);
162
163         record_root_in_trans(root);
164         h->transid = root->fs_info->running_transaction->transid;
165         h->transaction = root->fs_info->running_transaction;
166         h->blocks_reserved = num_blocks;
167         h->blocks_used = 0;
168         h->block_group = NULL;
169         h->alloc_exclude_nr = 0;
170         h->alloc_exclude_start = 0;
171         root->fs_info->running_transaction->use_count++;
172         mutex_unlock(&root->fs_info->trans_mutex);
173         return h;
174 }
175
176 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
177                                                    int num_blocks)
178 {
179         return start_transaction(root, num_blocks, 0);
180 }
181 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
182                                                    int num_blocks)
183 {
184         return start_transaction(root, num_blocks, 1);
185 }
186
187 static noinline int wait_for_commit(struct btrfs_root *root,
188                                     struct btrfs_transaction *commit)
189 {
190         DEFINE_WAIT(wait);
191         mutex_lock(&root->fs_info->trans_mutex);
192         while(!commit->commit_done) {
193                 prepare_to_wait(&commit->commit_wait, &wait,
194                                 TASK_UNINTERRUPTIBLE);
195                 if (commit->commit_done)
196                         break;
197                 mutex_unlock(&root->fs_info->trans_mutex);
198                 schedule();
199                 mutex_lock(&root->fs_info->trans_mutex);
200         }
201         mutex_unlock(&root->fs_info->trans_mutex);
202         finish_wait(&commit->commit_wait, &wait);
203         return 0;
204 }
205
206 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
207                           struct btrfs_root *root, int throttle)
208 {
209         struct btrfs_transaction *cur_trans;
210
211         mutex_lock(&root->fs_info->trans_mutex);
212         cur_trans = root->fs_info->running_transaction;
213         WARN_ON(cur_trans != trans->transaction);
214         WARN_ON(cur_trans->num_writers < 1);
215         cur_trans->num_writers--;
216
217         if (waitqueue_active(&cur_trans->writer_wait))
218                 wake_up(&cur_trans->writer_wait);
219
220         if (0 && cur_trans->in_commit && throttle) {
221                 DEFINE_WAIT(wait);
222                 mutex_unlock(&root->fs_info->trans_mutex);
223                 prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
224                                 TASK_UNINTERRUPTIBLE);
225                 schedule();
226                 finish_wait(&root->fs_info->transaction_throttle, &wait);
227                 mutex_lock(&root->fs_info->trans_mutex);
228         }
229
230         put_transaction(cur_trans);
231         mutex_unlock(&root->fs_info->trans_mutex);
232         memset(trans, 0, sizeof(*trans));
233         kmem_cache_free(btrfs_trans_handle_cachep, trans);
234         return 0;
235 }
236
237 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
238                           struct btrfs_root *root)
239 {
240         return __btrfs_end_transaction(trans, root, 0);
241 }
242
243 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
244                                    struct btrfs_root *root)
245 {
246         return __btrfs_end_transaction(trans, root, 1);
247 }
248
249
250 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
251                                      struct btrfs_root *root)
252 {
253         int ret;
254         int err;
255         int werr = 0;
256         struct extent_io_tree *dirty_pages;
257         struct page *page;
258         struct inode *btree_inode = root->fs_info->btree_inode;
259         u64 start;
260         u64 end;
261         unsigned long index;
262
263         if (!trans || !trans->transaction) {
264                 return filemap_write_and_wait(btree_inode->i_mapping);
265         }
266         dirty_pages = &trans->transaction->dirty_pages;
267         while(1) {
268                 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
269                                             EXTENT_DIRTY);
270                 if (ret)
271                         break;
272                 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
273                 while(start <= end) {
274                         index = start >> PAGE_CACHE_SHIFT;
275                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
276                         page = find_lock_page(btree_inode->i_mapping, index);
277                         if (!page)
278                                 continue;
279                         if (PageWriteback(page)) {
280                                 if (PageDirty(page))
281                                         wait_on_page_writeback(page);
282                                 else {
283                                         unlock_page(page);
284                                         page_cache_release(page);
285                                         continue;
286                                 }
287                         }
288                         err = write_one_page(page, 0);
289                         if (err)
290                                 werr = err;
291                         page_cache_release(page);
292                 }
293         }
294         err = filemap_fdatawait(btree_inode->i_mapping);
295         if (err)
296                 werr = err;
297         return werr;
298 }
299
300 static int update_cowonly_root(struct btrfs_trans_handle *trans,
301                                struct btrfs_root *root)
302 {
303         int ret;
304         u64 old_root_bytenr;
305         struct btrfs_root *tree_root = root->fs_info->tree_root;
306
307         btrfs_write_dirty_block_groups(trans, root);
308         while(1) {
309                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
310                 if (old_root_bytenr == root->node->start)
311                         break;
312                 btrfs_set_root_bytenr(&root->root_item,
313                                        root->node->start);
314                 btrfs_set_root_level(&root->root_item,
315                                      btrfs_header_level(root->node));
316                 ret = btrfs_update_root(trans, tree_root,
317                                         &root->root_key,
318                                         &root->root_item);
319                 BUG_ON(ret);
320                 btrfs_write_dirty_block_groups(trans, root);
321         }
322         return 0;
323 }
324
325 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
326                             struct btrfs_root *root)
327 {
328         struct btrfs_fs_info *fs_info = root->fs_info;
329         struct list_head *next;
330
331         while(!list_empty(&fs_info->dirty_cowonly_roots)) {
332                 next = fs_info->dirty_cowonly_roots.next;
333                 list_del_init(next);
334                 root = list_entry(next, struct btrfs_root, dirty_list);
335                 update_cowonly_root(trans, root);
336         }
337         return 0;
338 }
339
340 int btrfs_add_dead_root(struct btrfs_root *root,
341                         struct btrfs_root *latest,
342                         struct list_head *dead_list)
343 {
344         struct dirty_root *dirty;
345
346         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
347         if (!dirty)
348                 return -ENOMEM;
349         btrfs_leaf_ref_tree_init(&dirty->ref_tree);
350         dirty->root = root;
351         dirty->latest_root = latest;
352         root->ref_tree = NULL;
353         list_add(&dirty->list, dead_list);
354         return 0;
355 }
356
357 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
358                                     struct radix_tree_root *radix,
359                                     struct list_head *list)
360 {
361         struct dirty_root *dirty;
362         struct btrfs_root *gang[8];
363         struct btrfs_root *root;
364         int i;
365         int ret;
366         int err = 0;
367         u32 refs;
368
369         while(1) {
370                 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
371                                                  ARRAY_SIZE(gang),
372                                                  BTRFS_ROOT_TRANS_TAG);
373                 if (ret == 0)
374                         break;
375                 for (i = 0; i < ret; i++) {
376                         root = gang[i];
377                         radix_tree_tag_clear(radix,
378                                      (unsigned long)root->root_key.objectid,
379                                      BTRFS_ROOT_TRANS_TAG);
380
381                         BUG_ON(!root->ref_tree);
382                         dirty = container_of(root->ref_tree, struct dirty_root,
383                                              ref_tree);
384
385                         if (root->commit_root == root->node) {
386                                 WARN_ON(root->node->start !=
387                                         btrfs_root_bytenr(&root->root_item));
388
389                                 BUG_ON(!btrfs_leaf_ref_tree_empty(
390                                                         root->ref_tree));
391                                 free_extent_buffer(root->commit_root);
392                                 root->commit_root = NULL;
393                                 root->ref_tree = NULL;
394                                 
395                                 kfree(dirty->root);
396                                 kfree(dirty);
397
398                                 /* make sure to update the root on disk
399                                  * so we get any updates to the block used
400                                  * counts
401                                  */
402                                 err = btrfs_update_root(trans,
403                                                 root->fs_info->tree_root,
404                                                 &root->root_key,
405                                                 &root->root_item);
406                                 continue;
407                         }
408
409                         memset(&root->root_item.drop_progress, 0,
410                                sizeof(struct btrfs_disk_key));
411                         root->root_item.drop_level = 0;
412                         root->commit_root = NULL;
413                         root->ref_tree = NULL;
414                         root->root_key.offset = root->fs_info->generation;
415                         btrfs_set_root_bytenr(&root->root_item,
416                                               root->node->start);
417                         btrfs_set_root_level(&root->root_item,
418                                              btrfs_header_level(root->node));
419                         err = btrfs_insert_root(trans, root->fs_info->tree_root,
420                                                 &root->root_key,
421                                                 &root->root_item);
422                         if (err)
423                                 break;
424
425                         refs = btrfs_root_refs(&dirty->root->root_item);
426                         btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
427                         err = btrfs_update_root(trans, root->fs_info->tree_root,
428                                                 &dirty->root->root_key,
429                                                 &dirty->root->root_item);
430
431                         BUG_ON(err);
432                         if (refs == 1) {
433                                 list_add(&dirty->list, list);
434                         } else {
435                                 WARN_ON(1);
436                                 free_extent_buffer(dirty->root->node);
437                                 kfree(dirty->root);
438                                 kfree(dirty);
439                         }
440                 }
441         }
442         return err;
443 }
444
445 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
446 {
447         struct btrfs_fs_info *info = root->fs_info;
448         int ret;
449         struct btrfs_trans_handle *trans;
450         unsigned long nr;
451
452         smp_mb();
453         if (root->defrag_running)
454                 return 0;
455         trans = btrfs_start_transaction(root, 1);
456         while (1) {
457                 root->defrag_running = 1;
458                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
459                 nr = trans->blocks_used;
460                 btrfs_end_transaction(trans, root);
461                 btrfs_btree_balance_dirty(info->tree_root, nr);
462                 cond_resched();
463
464                 trans = btrfs_start_transaction(root, 1);
465                 if (root->fs_info->closing || ret != -EAGAIN)
466                         break;
467         }
468         root->defrag_running = 0;
469         smp_mb();
470         btrfs_end_transaction(trans, root);
471         return 0;
472 }
473
474 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
475                                      struct list_head *list)
476 {
477         struct dirty_root *dirty;
478         struct btrfs_trans_handle *trans;
479         unsigned long nr;
480         u64 num_bytes;
481         u64 bytes_used;
482         int ret = 0;
483         int err;
484
485         while(!list_empty(list)) {
486                 struct btrfs_root *root;
487
488                 dirty = list_entry(list->next, struct dirty_root, list);
489                 list_del_init(&dirty->list);
490
491                 num_bytes = btrfs_root_used(&dirty->root->root_item);
492                 root = dirty->latest_root;
493                 atomic_inc(&root->fs_info->throttles);
494
495                 mutex_lock(&root->fs_info->drop_mutex);
496                 while(1) {
497                         trans = btrfs_start_transaction(tree_root, 1);
498                         ret = btrfs_drop_snapshot(trans, dirty->root);
499                         if (ret != -EAGAIN) {
500                                 break;
501                         }
502
503                         err = btrfs_update_root(trans,
504                                         tree_root,
505                                         &dirty->root->root_key,
506                                         &dirty->root->root_item);
507                         if (err)
508                                 ret = err;
509                         nr = trans->blocks_used;
510                         ret = btrfs_end_transaction_throttle(trans, tree_root);
511                         BUG_ON(ret);
512
513                         mutex_unlock(&root->fs_info->drop_mutex);
514                         btrfs_btree_balance_dirty(tree_root, nr);
515                         cond_resched();
516                         mutex_lock(&root->fs_info->drop_mutex);
517                 }
518                 BUG_ON(ret);
519                 atomic_dec(&root->fs_info->throttles);
520
521                 mutex_lock(&root->fs_info->alloc_mutex);
522                 num_bytes -= btrfs_root_used(&dirty->root->root_item);
523                 bytes_used = btrfs_root_used(&root->root_item);
524                 if (num_bytes) {
525                         record_root_in_trans(root);
526                         btrfs_set_root_used(&root->root_item,
527                                             bytes_used - num_bytes);
528                 }
529                 mutex_unlock(&root->fs_info->alloc_mutex);
530
531                 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
532                 if (ret) {
533                         BUG();
534                         break;
535                 }
536                 mutex_unlock(&root->fs_info->drop_mutex);
537
538                 nr = trans->blocks_used;
539                 ret = btrfs_end_transaction(trans, tree_root);
540                 BUG_ON(ret);
541
542                 btrfs_remove_leaf_refs(dirty->root);
543
544                 free_extent_buffer(dirty->root->node);
545                 kfree(dirty->root);
546                 kfree(dirty);
547
548                 btrfs_btree_balance_dirty(tree_root, nr);
549                 cond_resched();
550         }
551         return ret;
552 }
553
554 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
555                                    struct btrfs_fs_info *fs_info,
556                                    struct btrfs_pending_snapshot *pending)
557 {
558         struct btrfs_key key;
559         struct btrfs_root_item *new_root_item;
560         struct btrfs_root *tree_root = fs_info->tree_root;
561         struct btrfs_root *root = pending->root;
562         struct extent_buffer *tmp;
563         struct extent_buffer *old;
564         int ret;
565         int namelen;
566         u64 objectid;
567
568         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
569         if (!new_root_item) {
570                 ret = -ENOMEM;
571                 goto fail;
572         }
573         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
574         if (ret)
575                 goto fail;
576
577         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
578
579         key.objectid = objectid;
580         key.offset = 1;
581         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
582
583         old = btrfs_lock_root_node(root);
584         btrfs_cow_block(trans, root, old, NULL, 0, &old);
585
586         btrfs_copy_root(trans, root, old, &tmp, objectid);
587         btrfs_tree_unlock(old);
588         free_extent_buffer(old);
589
590         btrfs_set_root_bytenr(new_root_item, tmp->start);
591         btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
592         ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
593                                 new_root_item);
594         btrfs_tree_unlock(tmp);
595         free_extent_buffer(tmp);
596         if (ret)
597                 goto fail;
598
599         /*
600          * insert the directory item
601          */
602         key.offset = (u64)-1;
603         namelen = strlen(pending->name);
604         ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
605                                     pending->name, namelen,
606                                     root->fs_info->sb->s_root->d_inode->i_ino,
607                                     &key, BTRFS_FT_DIR, 0);
608
609         if (ret)
610                 goto fail;
611
612         ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
613                              pending->name, strlen(pending->name), objectid,
614                              root->fs_info->sb->s_root->d_inode->i_ino, 0);
615
616         /* Invalidate existing dcache entry for new snapshot. */
617         btrfs_invalidate_dcache_root(root, pending->name, namelen);
618
619 fail:
620         kfree(new_root_item);
621         return ret;
622 }
623
624 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
625                                              struct btrfs_fs_info *fs_info)
626 {
627         struct btrfs_pending_snapshot *pending;
628         struct list_head *head = &trans->transaction->pending_snapshots;
629         int ret;
630
631         while(!list_empty(head)) {
632                 pending = list_entry(head->next,
633                                      struct btrfs_pending_snapshot, list);
634                 ret = create_pending_snapshot(trans, fs_info, pending);
635                 BUG_ON(ret);
636                 list_del(&pending->list);
637                 kfree(pending->name);
638                 kfree(pending);
639         }
640         return 0;
641 }
642
643 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
644                              struct btrfs_root *root)
645 {
646         unsigned long joined = 0;
647         unsigned long timeout = 1;
648         struct btrfs_transaction *cur_trans;
649         struct btrfs_transaction *prev_trans = NULL;
650         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
651         struct list_head dirty_fs_roots;
652         struct extent_io_tree *pinned_copy;
653         DEFINE_WAIT(wait);
654         int ret;
655
656         INIT_LIST_HEAD(&dirty_fs_roots);
657
658         mutex_lock(&root->fs_info->trans_mutex);
659         if (trans->transaction->in_commit) {
660                 cur_trans = trans->transaction;
661                 trans->transaction->use_count++;
662                 mutex_unlock(&root->fs_info->trans_mutex);
663                 btrfs_end_transaction(trans, root);
664
665                 ret = wait_for_commit(root, cur_trans);
666                 BUG_ON(ret);
667
668                 mutex_lock(&root->fs_info->trans_mutex);
669                 put_transaction(cur_trans);
670                 mutex_unlock(&root->fs_info->trans_mutex);
671
672                 return 0;
673         }
674
675         pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
676         if (!pinned_copy)
677                 return -ENOMEM;
678
679         extent_io_tree_init(pinned_copy,
680                              root->fs_info->btree_inode->i_mapping, GFP_NOFS);
681
682         trans->transaction->in_commit = 1;
683         trans->transaction->blocked = 1;
684         cur_trans = trans->transaction;
685         if (cur_trans->list.prev != &root->fs_info->trans_list) {
686                 prev_trans = list_entry(cur_trans->list.prev,
687                                         struct btrfs_transaction, list);
688                 if (!prev_trans->commit_done) {
689                         prev_trans->use_count++;
690                         mutex_unlock(&root->fs_info->trans_mutex);
691
692                         wait_for_commit(root, prev_trans);
693
694                         mutex_lock(&root->fs_info->trans_mutex);
695                         put_transaction(prev_trans);
696                 }
697         }
698
699         do {
700                 joined = cur_trans->num_joined;
701                 WARN_ON(cur_trans != trans->transaction);
702                 prepare_to_wait(&cur_trans->writer_wait, &wait,
703                                 TASK_UNINTERRUPTIBLE);
704
705                 if (cur_trans->num_writers > 1)
706                         timeout = MAX_SCHEDULE_TIMEOUT;
707                 else
708                         timeout = 1;
709
710                 mutex_unlock(&root->fs_info->trans_mutex);
711
712                 schedule_timeout(timeout);
713
714                 mutex_lock(&root->fs_info->trans_mutex);
715                 finish_wait(&cur_trans->writer_wait, &wait);
716         } while (cur_trans->num_writers > 1 ||
717                  (cur_trans->num_joined != joined));
718
719         ret = create_pending_snapshots(trans, root->fs_info);
720         BUG_ON(ret);
721
722         WARN_ON(cur_trans != trans->transaction);
723
724         ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
725                               &dirty_fs_roots);
726         BUG_ON(ret);
727
728         spin_lock(&root->fs_info->ref_cache_lock);
729         root->fs_info->running_ref_cache_size = 0;
730         spin_unlock(&root->fs_info->ref_cache_lock);
731
732         ret = btrfs_commit_tree_roots(trans, root);
733         BUG_ON(ret);
734
735         cur_trans = root->fs_info->running_transaction;
736         spin_lock(&root->fs_info->new_trans_lock);
737         root->fs_info->running_transaction = NULL;
738         spin_unlock(&root->fs_info->new_trans_lock);
739         btrfs_set_super_generation(&root->fs_info->super_copy,
740                                    cur_trans->transid);
741         btrfs_set_super_root(&root->fs_info->super_copy,
742                              root->fs_info->tree_root->node->start);
743         btrfs_set_super_root_level(&root->fs_info->super_copy,
744                            btrfs_header_level(root->fs_info->tree_root->node));
745
746         btrfs_set_super_chunk_root(&root->fs_info->super_copy,
747                                    chunk_root->node->start);
748         btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
749                                          btrfs_header_level(chunk_root->node));
750         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
751                sizeof(root->fs_info->super_copy));
752
753         btrfs_copy_pinned(root, pinned_copy);
754
755         trans->transaction->blocked = 0;
756         wake_up(&root->fs_info->transaction_throttle);
757         wake_up(&root->fs_info->transaction_wait);
758
759         mutex_unlock(&root->fs_info->trans_mutex);
760         ret = btrfs_write_and_wait_transaction(trans, root);
761         BUG_ON(ret);
762         write_ctree_super(trans, root);
763
764         btrfs_finish_extent_commit(trans, root, pinned_copy);
765         mutex_lock(&root->fs_info->trans_mutex);
766
767         kfree(pinned_copy);
768
769         cur_trans->commit_done = 1;
770         root->fs_info->last_trans_committed = cur_trans->transid;
771         wake_up(&cur_trans->commit_wait);
772         put_transaction(cur_trans);
773         put_transaction(cur_trans);
774
775         if (root->fs_info->closing)
776                 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
777         else
778                 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
779
780         mutex_unlock(&root->fs_info->trans_mutex);
781         kmem_cache_free(btrfs_trans_handle_cachep, trans);
782
783         if (root->fs_info->closing) {
784                 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
785         }
786         return ret;
787 }
788
789 int btrfs_clean_old_snapshots(struct btrfs_root *root)
790 {
791         struct list_head dirty_roots;
792         INIT_LIST_HEAD(&dirty_roots);
793 again:
794         mutex_lock(&root->fs_info->trans_mutex);
795         list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
796         mutex_unlock(&root->fs_info->trans_mutex);
797
798         if (!list_empty(&dirty_roots)) {
799                 drop_dirty_roots(root, &dirty_roots);
800                 goto again;
801         }
802         return 0;
803 }
804