]> Pileus Git - ~andy/linux/blob - fs/btrfs/transaction.c
btrfs_start_transaction: wait for commits in progress to finish
[~andy/linux] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "locking.h"
27
28 static int total_trans = 0;
29 extern struct kmem_cache *btrfs_trans_handle_cachep;
30 extern struct kmem_cache *btrfs_transaction_cachep;
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(transaction->use_count == 0);
37         transaction->use_count--;
38         if (transaction->use_count == 0) {
39                 WARN_ON(total_trans == 0);
40                 total_trans--;
41                 list_del_init(&transaction->list);
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline int join_transaction(struct btrfs_root *root)
48 {
49         struct btrfs_transaction *cur_trans;
50         cur_trans = root->fs_info->running_transaction;
51         if (!cur_trans) {
52                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
53                                              GFP_NOFS);
54                 total_trans++;
55                 BUG_ON(!cur_trans);
56                 root->fs_info->generation++;
57                 root->fs_info->last_alloc = 0;
58                 root->fs_info->last_data_alloc = 0;
59                 cur_trans->num_writers = 1;
60                 cur_trans->num_joined = 0;
61                 cur_trans->transid = root->fs_info->generation;
62                 init_waitqueue_head(&cur_trans->writer_wait);
63                 init_waitqueue_head(&cur_trans->commit_wait);
64                 cur_trans->in_commit = 0;
65                 cur_trans->blocked = 0;
66                 cur_trans->use_count = 1;
67                 cur_trans->commit_done = 0;
68                 cur_trans->start_time = get_seconds();
69                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
70                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
71                 extent_io_tree_init(&cur_trans->dirty_pages,
72                                      root->fs_info->btree_inode->i_mapping,
73                                      GFP_NOFS);
74                 spin_lock(&root->fs_info->new_trans_lock);
75                 root->fs_info->running_transaction = cur_trans;
76                 spin_unlock(&root->fs_info->new_trans_lock);
77         } else {
78                 cur_trans->num_writers++;
79                 cur_trans->num_joined++;
80         }
81
82         return 0;
83 }
84
85 static noinline int record_root_in_trans(struct btrfs_root *root)
86 {
87         u64 running_trans_id = root->fs_info->running_transaction->transid;
88         if (root->ref_cows && root->last_trans < running_trans_id) {
89                 WARN_ON(root == root->fs_info->extent_root);
90                 if (root->root_item.refs != 0) {
91                         radix_tree_tag_set(&root->fs_info->fs_roots_radix,
92                                    (unsigned long)root->root_key.objectid,
93                                    BTRFS_ROOT_TRANS_TAG);
94                         root->commit_root = btrfs_root_node(root);
95                 } else {
96                         WARN_ON(1);
97                 }
98                 root->last_trans = running_trans_id;
99         }
100         return 0;
101 }
102
103 struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
104                                              int num_blocks, int join)
105 {
106         struct btrfs_trans_handle *h =
107                 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
108         struct btrfs_transaction *cur_trans;
109         int ret;
110
111         mutex_lock(&root->fs_info->trans_mutex);
112         cur_trans = root->fs_info->running_transaction;
113         if (cur_trans && cur_trans->blocked && !join) {
114                 DEFINE_WAIT(wait);
115                 cur_trans->use_count++;
116                 while(1) {
117                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
118                                         TASK_UNINTERRUPTIBLE);
119                         if (cur_trans->blocked) {
120                                 mutex_unlock(&root->fs_info->trans_mutex);
121                                 schedule();
122                                 mutex_lock(&root->fs_info->trans_mutex);
123                                 finish_wait(&root->fs_info->transaction_wait,
124                                             &wait);
125                         } else {
126                                 finish_wait(&root->fs_info->transaction_wait,
127                                             &wait);
128                                 break;
129                         }
130                 }
131                 put_transaction(cur_trans);
132         }
133         ret = join_transaction(root);
134         BUG_ON(ret);
135
136         record_root_in_trans(root);
137         h->transid = root->fs_info->running_transaction->transid;
138         h->transaction = root->fs_info->running_transaction;
139         h->blocks_reserved = num_blocks;
140         h->blocks_used = 0;
141         h->block_group = NULL;
142         h->alloc_exclude_nr = 0;
143         h->alloc_exclude_start = 0;
144         root->fs_info->running_transaction->use_count++;
145         mutex_unlock(&root->fs_info->trans_mutex);
146         return h;
147 }
148
149 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
150                                                    int num_blocks)
151 {
152         return start_transaction(root, num_blocks, 0);
153 }
154 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
155                                                    int num_blocks)
156 {
157         return start_transaction(root, num_blocks, 1);
158 }
159
160 static noinline int wait_for_commit(struct btrfs_root *root,
161                                     struct btrfs_transaction *commit)
162 {
163         DEFINE_WAIT(wait);
164         mutex_lock(&root->fs_info->trans_mutex);
165         while(!commit->commit_done) {
166                 prepare_to_wait(&commit->commit_wait, &wait,
167                                 TASK_UNINTERRUPTIBLE);
168                 if (commit->commit_done)
169                         break;
170                 mutex_unlock(&root->fs_info->trans_mutex);
171                 schedule();
172                 mutex_lock(&root->fs_info->trans_mutex);
173         }
174         mutex_unlock(&root->fs_info->trans_mutex);
175         finish_wait(&commit->commit_wait, &wait);
176         return 0;
177 }
178
179 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
180                           struct btrfs_root *root, int throttle)
181 {
182         struct btrfs_transaction *cur_trans;
183
184         mutex_lock(&root->fs_info->trans_mutex);
185         cur_trans = root->fs_info->running_transaction;
186         WARN_ON(cur_trans != trans->transaction);
187         WARN_ON(cur_trans->num_writers < 1);
188         cur_trans->num_writers--;
189
190         if (waitqueue_active(&cur_trans->writer_wait))
191                 wake_up(&cur_trans->writer_wait);
192
193         if (0 && cur_trans->in_commit && throttle) {
194                 DEFINE_WAIT(wait);
195                 mutex_unlock(&root->fs_info->trans_mutex);
196                 prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
197                                 TASK_UNINTERRUPTIBLE);
198                 schedule();
199                 finish_wait(&root->fs_info->transaction_throttle, &wait);
200                 mutex_lock(&root->fs_info->trans_mutex);
201         }
202
203         put_transaction(cur_trans);
204         mutex_unlock(&root->fs_info->trans_mutex);
205         memset(trans, 0, sizeof(*trans));
206         kmem_cache_free(btrfs_trans_handle_cachep, trans);
207         return 0;
208 }
209
210 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
211                           struct btrfs_root *root)
212 {
213         return __btrfs_end_transaction(trans, root, 0);
214 }
215
216 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
217                                    struct btrfs_root *root)
218 {
219         return __btrfs_end_transaction(trans, root, 1);
220 }
221
222
223 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
224                                      struct btrfs_root *root)
225 {
226         int ret;
227         int err;
228         int werr = 0;
229         struct extent_io_tree *dirty_pages;
230         struct page *page;
231         struct inode *btree_inode = root->fs_info->btree_inode;
232         u64 start;
233         u64 end;
234         unsigned long index;
235
236         if (!trans || !trans->transaction) {
237                 return filemap_write_and_wait(btree_inode->i_mapping);
238         }
239         dirty_pages = &trans->transaction->dirty_pages;
240         while(1) {
241                 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
242                                             EXTENT_DIRTY);
243                 if (ret)
244                         break;
245                 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
246                 while(start <= end) {
247                         index = start >> PAGE_CACHE_SHIFT;
248                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
249                         page = find_lock_page(btree_inode->i_mapping, index);
250                         if (!page)
251                                 continue;
252                         if (PageWriteback(page)) {
253                                 if (PageDirty(page))
254                                         wait_on_page_writeback(page);
255                                 else {
256                                         unlock_page(page);
257                                         page_cache_release(page);
258                                         continue;
259                                 }
260                         }
261                         err = write_one_page(page, 0);
262                         if (err)
263                                 werr = err;
264                         page_cache_release(page);
265                 }
266         }
267         err = filemap_fdatawait(btree_inode->i_mapping);
268         if (err)
269                 werr = err;
270         return werr;
271 }
272
273 static int update_cowonly_root(struct btrfs_trans_handle *trans,
274                                struct btrfs_root *root)
275 {
276         int ret;
277         u64 old_root_bytenr;
278         struct btrfs_root *tree_root = root->fs_info->tree_root;
279
280         btrfs_write_dirty_block_groups(trans, root);
281         while(1) {
282                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
283                 if (old_root_bytenr == root->node->start)
284                         break;
285                 btrfs_set_root_bytenr(&root->root_item,
286                                        root->node->start);
287                 btrfs_set_root_level(&root->root_item,
288                                      btrfs_header_level(root->node));
289                 ret = btrfs_update_root(trans, tree_root,
290                                         &root->root_key,
291                                         &root->root_item);
292                 BUG_ON(ret);
293                 btrfs_write_dirty_block_groups(trans, root);
294         }
295         return 0;
296 }
297
298 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
299                             struct btrfs_root *root)
300 {
301         struct btrfs_fs_info *fs_info = root->fs_info;
302         struct list_head *next;
303
304         while(!list_empty(&fs_info->dirty_cowonly_roots)) {
305                 next = fs_info->dirty_cowonly_roots.next;
306                 list_del_init(next);
307                 root = list_entry(next, struct btrfs_root, dirty_list);
308                 update_cowonly_root(trans, root);
309         }
310         return 0;
311 }
312
313 struct dirty_root {
314         struct list_head list;
315         struct btrfs_root *root;
316         struct btrfs_root *latest_root;
317 };
318
319 int btrfs_add_dead_root(struct btrfs_root *root,
320                         struct btrfs_root *latest,
321                         struct list_head *dead_list)
322 {
323         struct dirty_root *dirty;
324
325         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
326         if (!dirty)
327                 return -ENOMEM;
328         dirty->root = root;
329         dirty->latest_root = latest;
330         list_add(&dirty->list, dead_list);
331         return 0;
332 }
333
334 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
335                                     struct radix_tree_root *radix,
336                                     struct list_head *list)
337 {
338         struct dirty_root *dirty;
339         struct btrfs_root *gang[8];
340         struct btrfs_root *root;
341         int i;
342         int ret;
343         int err = 0;
344         u32 refs;
345
346         while(1) {
347                 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
348                                                  ARRAY_SIZE(gang),
349                                                  BTRFS_ROOT_TRANS_TAG);
350                 if (ret == 0)
351                         break;
352                 for (i = 0; i < ret; i++) {
353                         root = gang[i];
354                         radix_tree_tag_clear(radix,
355                                      (unsigned long)root->root_key.objectid,
356                                      BTRFS_ROOT_TRANS_TAG);
357                         if (root->commit_root == root->node) {
358                                 WARN_ON(root->node->start !=
359                                         btrfs_root_bytenr(&root->root_item));
360                                 free_extent_buffer(root->commit_root);
361                                 root->commit_root = NULL;
362
363                                 /* make sure to update the root on disk
364                                  * so we get any updates to the block used
365                                  * counts
366                                  */
367                                 err = btrfs_update_root(trans,
368                                                 root->fs_info->tree_root,
369                                                 &root->root_key,
370                                                 &root->root_item);
371                                 continue;
372                         }
373                         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
374                         BUG_ON(!dirty);
375                         dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
376                         BUG_ON(!dirty->root);
377
378                         memset(&root->root_item.drop_progress, 0,
379                                sizeof(struct btrfs_disk_key));
380                         root->root_item.drop_level = 0;
381
382                         memcpy(dirty->root, root, sizeof(*root));
383                         dirty->root->node = root->commit_root;
384                         dirty->latest_root = root;
385                         root->commit_root = NULL;
386
387                         root->root_key.offset = root->fs_info->generation;
388                         btrfs_set_root_bytenr(&root->root_item,
389                                               root->node->start);
390                         btrfs_set_root_level(&root->root_item,
391                                              btrfs_header_level(root->node));
392                         err = btrfs_insert_root(trans, root->fs_info->tree_root,
393                                                 &root->root_key,
394                                                 &root->root_item);
395                         if (err)
396                                 break;
397
398                         refs = btrfs_root_refs(&dirty->root->root_item);
399                         btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
400                         err = btrfs_update_root(trans, root->fs_info->tree_root,
401                                                 &dirty->root->root_key,
402                                                 &dirty->root->root_item);
403
404                         BUG_ON(err);
405                         if (refs == 1) {
406                                 list_add(&dirty->list, list);
407                         } else {
408                                 WARN_ON(1);
409                                 kfree(dirty->root);
410                                 kfree(dirty);
411                         }
412                 }
413         }
414         return err;
415 }
416
417 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
418 {
419         struct btrfs_fs_info *info = root->fs_info;
420         int ret;
421         struct btrfs_trans_handle *trans;
422         unsigned long nr;
423
424         smp_mb();
425         if (root->defrag_running)
426                 return 0;
427         trans = btrfs_start_transaction(root, 1);
428         while (1) {
429                 root->defrag_running = 1;
430                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
431                 nr = trans->blocks_used;
432                 btrfs_end_transaction(trans, root);
433                 btrfs_btree_balance_dirty(info->tree_root, nr);
434                 cond_resched();
435
436                 trans = btrfs_start_transaction(root, 1);
437                 if (root->fs_info->closing || ret != -EAGAIN)
438                         break;
439         }
440         root->defrag_running = 0;
441         smp_mb();
442         btrfs_end_transaction(trans, root);
443         return 0;
444 }
445
446 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
447                                      struct list_head *list)
448 {
449         struct dirty_root *dirty;
450         struct btrfs_trans_handle *trans;
451         unsigned long nr;
452         u64 num_bytes;
453         u64 bytes_used;
454         int ret = 0;
455         int err;
456
457         while(!list_empty(list)) {
458                 struct btrfs_root *root;
459
460                 dirty = list_entry(list->next, struct dirty_root, list);
461                 list_del_init(&dirty->list);
462
463                 num_bytes = btrfs_root_used(&dirty->root->root_item);
464                 root = dirty->latest_root;
465                 atomic_inc(&root->fs_info->throttles);
466
467                 mutex_lock(&root->fs_info->drop_mutex);
468                 while(1) {
469                         trans = btrfs_start_transaction(tree_root, 1);
470                         ret = btrfs_drop_snapshot(trans, dirty->root);
471                         if (ret != -EAGAIN) {
472                                 break;
473                         }
474
475                         err = btrfs_update_root(trans,
476                                         tree_root,
477                                         &dirty->root->root_key,
478                                         &dirty->root->root_item);
479                         if (err)
480                                 ret = err;
481                         nr = trans->blocks_used;
482                         ret = btrfs_end_transaction_throttle(trans, tree_root);
483                         BUG_ON(ret);
484
485                         mutex_unlock(&root->fs_info->drop_mutex);
486                         btrfs_btree_balance_dirty(tree_root, nr);
487                         cond_resched();
488                         mutex_lock(&root->fs_info->drop_mutex);
489                 }
490                 BUG_ON(ret);
491                 atomic_dec(&root->fs_info->throttles);
492
493                 mutex_lock(&root->fs_info->alloc_mutex);
494                 num_bytes -= btrfs_root_used(&dirty->root->root_item);
495                 bytes_used = btrfs_root_used(&root->root_item);
496                 if (num_bytes) {
497                         record_root_in_trans(root);
498                         btrfs_set_root_used(&root->root_item,
499                                             bytes_used - num_bytes);
500                 }
501                 mutex_unlock(&root->fs_info->alloc_mutex);
502
503                 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
504                 if (ret) {
505                         BUG();
506                         break;
507                 }
508                 mutex_unlock(&root->fs_info->drop_mutex);
509
510                 nr = trans->blocks_used;
511                 ret = btrfs_end_transaction(trans, tree_root);
512                 BUG_ON(ret);
513
514                 free_extent_buffer(dirty->root->node);
515                 kfree(dirty->root);
516                 kfree(dirty);
517
518                 btrfs_btree_balance_dirty(tree_root, nr);
519                 cond_resched();
520         }
521         return ret;
522 }
523
524 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
525                                    struct btrfs_fs_info *fs_info,
526                                    struct btrfs_pending_snapshot *pending)
527 {
528         struct btrfs_key key;
529         struct btrfs_root_item *new_root_item;
530         struct btrfs_root *tree_root = fs_info->tree_root;
531         struct btrfs_root *root = pending->root;
532         struct extent_buffer *tmp;
533         struct extent_buffer *old;
534         int ret;
535         int namelen;
536         u64 objectid;
537
538         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
539         if (!new_root_item) {
540                 ret = -ENOMEM;
541                 goto fail;
542         }
543         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
544         if (ret)
545                 goto fail;
546
547         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
548
549         key.objectid = objectid;
550         key.offset = 1;
551         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
552
553         old = btrfs_lock_root_node(root);
554         btrfs_cow_block(trans, root, old, NULL, 0, &old);
555
556         btrfs_copy_root(trans, root, old, &tmp, objectid);
557         btrfs_tree_unlock(old);
558         free_extent_buffer(old);
559
560         btrfs_set_root_bytenr(new_root_item, tmp->start);
561         btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
562         ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
563                                 new_root_item);
564         btrfs_tree_unlock(tmp);
565         free_extent_buffer(tmp);
566         if (ret)
567                 goto fail;
568
569         /*
570          * insert the directory item
571          */
572         key.offset = (u64)-1;
573         namelen = strlen(pending->name);
574         ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
575                                     pending->name, namelen,
576                                     root->fs_info->sb->s_root->d_inode->i_ino,
577                                     &key, BTRFS_FT_DIR);
578
579         if (ret)
580                 goto fail;
581
582         ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
583                              pending->name, strlen(pending->name), objectid,
584                              root->fs_info->sb->s_root->d_inode->i_ino);
585
586         /* Invalidate existing dcache entry for new snapshot. */
587         btrfs_invalidate_dcache_root(root, pending->name, namelen);
588
589 fail:
590         kfree(new_root_item);
591         return ret;
592 }
593
594 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
595                                              struct btrfs_fs_info *fs_info)
596 {
597         struct btrfs_pending_snapshot *pending;
598         struct list_head *head = &trans->transaction->pending_snapshots;
599         int ret;
600
601         while(!list_empty(head)) {
602                 pending = list_entry(head->next,
603                                      struct btrfs_pending_snapshot, list);
604                 ret = create_pending_snapshot(trans, fs_info, pending);
605                 BUG_ON(ret);
606                 list_del(&pending->list);
607                 kfree(pending->name);
608                 kfree(pending);
609         }
610         return 0;
611 }
612
613 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
614                              struct btrfs_root *root)
615 {
616         unsigned long joined = 0;
617         unsigned long timeout = 1;
618         struct btrfs_transaction *cur_trans;
619         struct btrfs_transaction *prev_trans = NULL;
620         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
621         struct list_head dirty_fs_roots;
622         struct extent_io_tree *pinned_copy;
623         DEFINE_WAIT(wait);
624         int ret;
625
626         INIT_LIST_HEAD(&dirty_fs_roots);
627
628         mutex_lock(&root->fs_info->trans_mutex);
629         if (trans->transaction->in_commit) {
630                 cur_trans = trans->transaction;
631                 trans->transaction->use_count++;
632                 mutex_unlock(&root->fs_info->trans_mutex);
633                 btrfs_end_transaction(trans, root);
634
635                 ret = wait_for_commit(root, cur_trans);
636                 BUG_ON(ret);
637
638                 mutex_lock(&root->fs_info->trans_mutex);
639                 put_transaction(cur_trans);
640                 mutex_unlock(&root->fs_info->trans_mutex);
641
642                 return 0;
643         }
644
645         pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
646         if (!pinned_copy)
647                 return -ENOMEM;
648
649         extent_io_tree_init(pinned_copy,
650                              root->fs_info->btree_inode->i_mapping, GFP_NOFS);
651
652 printk("commit trans %Lu\n", trans->transid);
653         trans->transaction->in_commit = 1;
654         trans->transaction->blocked = 1;
655         cur_trans = trans->transaction;
656         if (cur_trans->list.prev != &root->fs_info->trans_list) {
657                 prev_trans = list_entry(cur_trans->list.prev,
658                                         struct btrfs_transaction, list);
659                 if (!prev_trans->commit_done) {
660                         prev_trans->use_count++;
661                         mutex_unlock(&root->fs_info->trans_mutex);
662
663                         wait_for_commit(root, prev_trans);
664
665                         mutex_lock(&root->fs_info->trans_mutex);
666                         put_transaction(prev_trans);
667                 }
668         }
669
670         do {
671                 joined = cur_trans->num_joined;
672                 WARN_ON(cur_trans != trans->transaction);
673                 prepare_to_wait(&cur_trans->writer_wait, &wait,
674                                 TASK_UNINTERRUPTIBLE);
675
676                 if (cur_trans->num_writers > 1)
677                         timeout = MAX_SCHEDULE_TIMEOUT;
678                 else
679                         timeout = 1;
680
681                 mutex_unlock(&root->fs_info->trans_mutex);
682
683                 schedule_timeout(timeout);
684
685                 mutex_lock(&root->fs_info->trans_mutex);
686                 finish_wait(&cur_trans->writer_wait, &wait);
687         } while (cur_trans->num_writers > 1 ||
688                  (cur_trans->num_joined != joined));
689
690         ret = create_pending_snapshots(trans, root->fs_info);
691         BUG_ON(ret);
692
693         WARN_ON(cur_trans != trans->transaction);
694
695         ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
696                               &dirty_fs_roots);
697         BUG_ON(ret);
698
699         ret = btrfs_commit_tree_roots(trans, root);
700         BUG_ON(ret);
701
702         cur_trans = root->fs_info->running_transaction;
703         spin_lock(&root->fs_info->new_trans_lock);
704         root->fs_info->running_transaction = NULL;
705         spin_unlock(&root->fs_info->new_trans_lock);
706         btrfs_set_super_generation(&root->fs_info->super_copy,
707                                    cur_trans->transid);
708         btrfs_set_super_root(&root->fs_info->super_copy,
709                              root->fs_info->tree_root->node->start);
710         btrfs_set_super_root_level(&root->fs_info->super_copy,
711                            btrfs_header_level(root->fs_info->tree_root->node));
712
713         btrfs_set_super_chunk_root(&root->fs_info->super_copy,
714                                    chunk_root->node->start);
715         btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
716                                          btrfs_header_level(chunk_root->node));
717         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
718                sizeof(root->fs_info->super_copy));
719
720         btrfs_copy_pinned(root, pinned_copy);
721
722         trans->transaction->blocked = 0;
723         wake_up(&root->fs_info->transaction_throttle);
724         wake_up(&root->fs_info->transaction_wait);
725
726         mutex_unlock(&root->fs_info->trans_mutex);
727         ret = btrfs_write_and_wait_transaction(trans, root);
728         BUG_ON(ret);
729         write_ctree_super(trans, root);
730
731         btrfs_finish_extent_commit(trans, root, pinned_copy);
732         mutex_lock(&root->fs_info->trans_mutex);
733
734         kfree(pinned_copy);
735
736         cur_trans->commit_done = 1;
737         root->fs_info->last_trans_committed = cur_trans->transid;
738         wake_up(&cur_trans->commit_wait);
739         put_transaction(cur_trans);
740         put_transaction(cur_trans);
741
742         if (root->fs_info->closing)
743                 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
744         else
745                 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
746
747         mutex_unlock(&root->fs_info->trans_mutex);
748 printk("done commit trans %Lu\n", trans->transid);
749         kmem_cache_free(btrfs_trans_handle_cachep, trans);
750
751         if (root->fs_info->closing) {
752                 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
753         }
754         return ret;
755 }
756
757 int btrfs_clean_old_snapshots(struct btrfs_root *root)
758 {
759         struct list_head dirty_roots;
760         INIT_LIST_HEAD(&dirty_roots);
761 again:
762         mutex_lock(&root->fs_info->trans_mutex);
763         list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
764         mutex_unlock(&root->fs_info->trans_mutex);
765
766         if (!list_empty(&dirty_roots)) {
767                 drop_dirty_roots(root, &dirty_roots);
768                 goto again;
769         }
770         return 0;
771 }
772