]> Pileus Git - ~andy/linux/blob - fs/btrfs/extent_io.c
Merge branches 'acpi_pad', 'acpica', 'apei-bugzilla-43282', 'battery', 'cpuidle-coupl...
[~andy/linux] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22 #include "locking.h"
23 #include "rcu-string.h"
24
25 static struct kmem_cache *extent_state_cache;
26 static struct kmem_cache *extent_buffer_cache;
27
28 static LIST_HEAD(buffers);
29 static LIST_HEAD(states);
30
31 #define LEAK_DEBUG 0
32 #if LEAK_DEBUG
33 static DEFINE_SPINLOCK(leak_lock);
34 #endif
35
36 #define BUFFER_LRU_MAX 64
37
38 struct tree_entry {
39         u64 start;
40         u64 end;
41         struct rb_node rb_node;
42 };
43
44 struct extent_page_data {
45         struct bio *bio;
46         struct extent_io_tree *tree;
47         get_extent_t *get_extent;
48
49         /* tells writepage not to lock the state bits for this range
50          * it still does the unlocking
51          */
52         unsigned int extent_locked:1;
53
54         /* tells the submit_bio code to use a WRITE_SYNC */
55         unsigned int sync_io:1;
56 };
57
58 static noinline void flush_write_bio(void *data);
59 static inline struct btrfs_fs_info *
60 tree_fs_info(struct extent_io_tree *tree)
61 {
62         return btrfs_sb(tree->mapping->host->i_sb);
63 }
64
65 int __init extent_io_init(void)
66 {
67         extent_state_cache = kmem_cache_create("extent_state",
68                         sizeof(struct extent_state), 0,
69                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
70         if (!extent_state_cache)
71                 return -ENOMEM;
72
73         extent_buffer_cache = kmem_cache_create("extent_buffers",
74                         sizeof(struct extent_buffer), 0,
75                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
76         if (!extent_buffer_cache)
77                 goto free_state_cache;
78         return 0;
79
80 free_state_cache:
81         kmem_cache_destroy(extent_state_cache);
82         return -ENOMEM;
83 }
84
85 void extent_io_exit(void)
86 {
87         struct extent_state *state;
88         struct extent_buffer *eb;
89
90         while (!list_empty(&states)) {
91                 state = list_entry(states.next, struct extent_state, leak_list);
92                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
93                        "state %lu in tree %p refs %d\n",
94                        (unsigned long long)state->start,
95                        (unsigned long long)state->end,
96                        state->state, state->tree, atomic_read(&state->refs));
97                 list_del(&state->leak_list);
98                 kmem_cache_free(extent_state_cache, state);
99
100         }
101
102         while (!list_empty(&buffers)) {
103                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
104                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
105                        "refs %d\n", (unsigned long long)eb->start,
106                        eb->len, atomic_read(&eb->refs));
107                 list_del(&eb->leak_list);
108                 kmem_cache_free(extent_buffer_cache, eb);
109         }
110         if (extent_state_cache)
111                 kmem_cache_destroy(extent_state_cache);
112         if (extent_buffer_cache)
113                 kmem_cache_destroy(extent_buffer_cache);
114 }
115
116 void extent_io_tree_init(struct extent_io_tree *tree,
117                          struct address_space *mapping)
118 {
119         tree->state = RB_ROOT;
120         INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
121         tree->ops = NULL;
122         tree->dirty_bytes = 0;
123         spin_lock_init(&tree->lock);
124         spin_lock_init(&tree->buffer_lock);
125         tree->mapping = mapping;
126 }
127
128 static struct extent_state *alloc_extent_state(gfp_t mask)
129 {
130         struct extent_state *state;
131 #if LEAK_DEBUG
132         unsigned long flags;
133 #endif
134
135         state = kmem_cache_alloc(extent_state_cache, mask);
136         if (!state)
137                 return state;
138         state->state = 0;
139         state->private = 0;
140         state->tree = NULL;
141 #if LEAK_DEBUG
142         spin_lock_irqsave(&leak_lock, flags);
143         list_add(&state->leak_list, &states);
144         spin_unlock_irqrestore(&leak_lock, flags);
145 #endif
146         atomic_set(&state->refs, 1);
147         init_waitqueue_head(&state->wq);
148         trace_alloc_extent_state(state, mask, _RET_IP_);
149         return state;
150 }
151
152 void free_extent_state(struct extent_state *state)
153 {
154         if (!state)
155                 return;
156         if (atomic_dec_and_test(&state->refs)) {
157 #if LEAK_DEBUG
158                 unsigned long flags;
159 #endif
160                 WARN_ON(state->tree);
161 #if LEAK_DEBUG
162                 spin_lock_irqsave(&leak_lock, flags);
163                 list_del(&state->leak_list);
164                 spin_unlock_irqrestore(&leak_lock, flags);
165 #endif
166                 trace_free_extent_state(state, _RET_IP_);
167                 kmem_cache_free(extent_state_cache, state);
168         }
169 }
170
171 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
172                                    struct rb_node *node)
173 {
174         struct rb_node **p = &root->rb_node;
175         struct rb_node *parent = NULL;
176         struct tree_entry *entry;
177
178         while (*p) {
179                 parent = *p;
180                 entry = rb_entry(parent, struct tree_entry, rb_node);
181
182                 if (offset < entry->start)
183                         p = &(*p)->rb_left;
184                 else if (offset > entry->end)
185                         p = &(*p)->rb_right;
186                 else
187                         return parent;
188         }
189
190         rb_link_node(node, parent, p);
191         rb_insert_color(node, root);
192         return NULL;
193 }
194
195 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
196                                      struct rb_node **prev_ret,
197                                      struct rb_node **next_ret)
198 {
199         struct rb_root *root = &tree->state;
200         struct rb_node *n = root->rb_node;
201         struct rb_node *prev = NULL;
202         struct rb_node *orig_prev = NULL;
203         struct tree_entry *entry;
204         struct tree_entry *prev_entry = NULL;
205
206         while (n) {
207                 entry = rb_entry(n, struct tree_entry, rb_node);
208                 prev = n;
209                 prev_entry = entry;
210
211                 if (offset < entry->start)
212                         n = n->rb_left;
213                 else if (offset > entry->end)
214                         n = n->rb_right;
215                 else
216                         return n;
217         }
218
219         if (prev_ret) {
220                 orig_prev = prev;
221                 while (prev && offset > prev_entry->end) {
222                         prev = rb_next(prev);
223                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224                 }
225                 *prev_ret = prev;
226                 prev = orig_prev;
227         }
228
229         if (next_ret) {
230                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231                 while (prev && offset < prev_entry->start) {
232                         prev = rb_prev(prev);
233                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234                 }
235                 *next_ret = prev;
236         }
237         return NULL;
238 }
239
240 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
241                                           u64 offset)
242 {
243         struct rb_node *prev = NULL;
244         struct rb_node *ret;
245
246         ret = __etree_search(tree, offset, &prev, NULL);
247         if (!ret)
248                 return prev;
249         return ret;
250 }
251
252 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
253                      struct extent_state *other)
254 {
255         if (tree->ops && tree->ops->merge_extent_hook)
256                 tree->ops->merge_extent_hook(tree->mapping->host, new,
257                                              other);
258 }
259
260 /*
261  * utility function to look for merge candidates inside a given range.
262  * Any extents with matching state are merged together into a single
263  * extent in the tree.  Extents with EXTENT_IO in their state field
264  * are not merged because the end_io handlers need to be able to do
265  * operations on them without sleeping (or doing allocations/splits).
266  *
267  * This should be called with the tree lock held.
268  */
269 static void merge_state(struct extent_io_tree *tree,
270                         struct extent_state *state)
271 {
272         struct extent_state *other;
273         struct rb_node *other_node;
274
275         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
276                 return;
277
278         other_node = rb_prev(&state->rb_node);
279         if (other_node) {
280                 other = rb_entry(other_node, struct extent_state, rb_node);
281                 if (other->end == state->start - 1 &&
282                     other->state == state->state) {
283                         merge_cb(tree, state, other);
284                         state->start = other->start;
285                         other->tree = NULL;
286                         rb_erase(&other->rb_node, &tree->state);
287                         free_extent_state(other);
288                 }
289         }
290         other_node = rb_next(&state->rb_node);
291         if (other_node) {
292                 other = rb_entry(other_node, struct extent_state, rb_node);
293                 if (other->start == state->end + 1 &&
294                     other->state == state->state) {
295                         merge_cb(tree, state, other);
296                         state->end = other->end;
297                         other->tree = NULL;
298                         rb_erase(&other->rb_node, &tree->state);
299                         free_extent_state(other);
300                 }
301         }
302 }
303
304 static void set_state_cb(struct extent_io_tree *tree,
305                          struct extent_state *state, int *bits)
306 {
307         if (tree->ops && tree->ops->set_bit_hook)
308                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
309 }
310
311 static void clear_state_cb(struct extent_io_tree *tree,
312                            struct extent_state *state, int *bits)
313 {
314         if (tree->ops && tree->ops->clear_bit_hook)
315                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
316 }
317
318 static void set_state_bits(struct extent_io_tree *tree,
319                            struct extent_state *state, int *bits);
320
321 /*
322  * insert an extent_state struct into the tree.  'bits' are set on the
323  * struct before it is inserted.
324  *
325  * This may return -EEXIST if the extent is already there, in which case the
326  * state struct is freed.
327  *
328  * The tree lock is not taken internally.  This is a utility function and
329  * probably isn't what you want to call (see set/clear_extent_bit).
330  */
331 static int insert_state(struct extent_io_tree *tree,
332                         struct extent_state *state, u64 start, u64 end,
333                         int *bits)
334 {
335         struct rb_node *node;
336
337         if (end < start) {
338                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
339                        (unsigned long long)end,
340                        (unsigned long long)start);
341                 WARN_ON(1);
342         }
343         state->start = start;
344         state->end = end;
345
346         set_state_bits(tree, state, bits);
347
348         node = tree_insert(&tree->state, end, &state->rb_node);
349         if (node) {
350                 struct extent_state *found;
351                 found = rb_entry(node, struct extent_state, rb_node);
352                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
353                        "%llu %llu\n", (unsigned long long)found->start,
354                        (unsigned long long)found->end,
355                        (unsigned long long)start, (unsigned long long)end);
356                 return -EEXIST;
357         }
358         state->tree = tree;
359         merge_state(tree, state);
360         return 0;
361 }
362
363 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
364                      u64 split)
365 {
366         if (tree->ops && tree->ops->split_extent_hook)
367                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
368 }
369
370 /*
371  * split a given extent state struct in two, inserting the preallocated
372  * struct 'prealloc' as the newly created second half.  'split' indicates an
373  * offset inside 'orig' where it should be split.
374  *
375  * Before calling,
376  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
377  * are two extent state structs in the tree:
378  * prealloc: [orig->start, split - 1]
379  * orig: [ split, orig->end ]
380  *
381  * The tree locks are not taken by this function. They need to be held
382  * by the caller.
383  */
384 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
385                        struct extent_state *prealloc, u64 split)
386 {
387         struct rb_node *node;
388
389         split_cb(tree, orig, split);
390
391         prealloc->start = orig->start;
392         prealloc->end = split - 1;
393         prealloc->state = orig->state;
394         orig->start = split;
395
396         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397         if (node) {
398                 free_extent_state(prealloc);
399                 return -EEXIST;
400         }
401         prealloc->tree = tree;
402         return 0;
403 }
404
405 static struct extent_state *next_state(struct extent_state *state)
406 {
407         struct rb_node *next = rb_next(&state->rb_node);
408         if (next)
409                 return rb_entry(next, struct extent_state, rb_node);
410         else
411                 return NULL;
412 }
413
414 /*
415  * utility function to clear some bits in an extent state struct.
416  * it will optionally wake up any one waiting on this state (wake == 1).
417  *
418  * If no bits are set on the state struct after clearing things, the
419  * struct is freed and removed from the tree
420  */
421 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
422                                             struct extent_state *state,
423                                             int *bits, int wake)
424 {
425         struct extent_state *next;
426         int bits_to_clear = *bits & ~EXTENT_CTLBITS;
427
428         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
429                 u64 range = state->end - state->start + 1;
430                 WARN_ON(range > tree->dirty_bytes);
431                 tree->dirty_bytes -= range;
432         }
433         clear_state_cb(tree, state, bits);
434         state->state &= ~bits_to_clear;
435         if (wake)
436                 wake_up(&state->wq);
437         if (state->state == 0) {
438                 next = next_state(state);
439                 if (state->tree) {
440                         rb_erase(&state->rb_node, &tree->state);
441                         state->tree = NULL;
442                         free_extent_state(state);
443                 } else {
444                         WARN_ON(1);
445                 }
446         } else {
447                 merge_state(tree, state);
448                 next = next_state(state);
449         }
450         return next;
451 }
452
453 static struct extent_state *
454 alloc_extent_state_atomic(struct extent_state *prealloc)
455 {
456         if (!prealloc)
457                 prealloc = alloc_extent_state(GFP_ATOMIC);
458
459         return prealloc;
460 }
461
462 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
463 {
464         btrfs_panic(tree_fs_info(tree), err, "Locking error: "
465                     "Extent tree was modified by another "
466                     "thread while locked.");
467 }
468
469 /*
470  * clear some bits on a range in the tree.  This may require splitting
471  * or inserting elements in the tree, so the gfp mask is used to
472  * indicate which allocations or sleeping are allowed.
473  *
474  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
475  * the given range from the tree regardless of state (ie for truncate).
476  *
477  * the range [start, end] is inclusive.
478  *
479  * This takes the tree lock, and returns 0 on success and < 0 on error.
480  */
481 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
482                      int bits, int wake, int delete,
483                      struct extent_state **cached_state,
484                      gfp_t mask)
485 {
486         struct extent_state *state;
487         struct extent_state *cached;
488         struct extent_state *prealloc = NULL;
489         struct rb_node *node;
490         u64 last_end;
491         int err;
492         int clear = 0;
493
494         if (delete)
495                 bits |= ~EXTENT_CTLBITS;
496         bits |= EXTENT_FIRST_DELALLOC;
497
498         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
499                 clear = 1;
500 again:
501         if (!prealloc && (mask & __GFP_WAIT)) {
502                 prealloc = alloc_extent_state(mask);
503                 if (!prealloc)
504                         return -ENOMEM;
505         }
506
507         spin_lock(&tree->lock);
508         if (cached_state) {
509                 cached = *cached_state;
510
511                 if (clear) {
512                         *cached_state = NULL;
513                         cached_state = NULL;
514                 }
515
516                 if (cached && cached->tree && cached->start <= start &&
517                     cached->end > start) {
518                         if (clear)
519                                 atomic_dec(&cached->refs);
520                         state = cached;
521                         goto hit_next;
522                 }
523                 if (clear)
524                         free_extent_state(cached);
525         }
526         /*
527          * this search will find the extents that end after
528          * our range starts
529          */
530         node = tree_search(tree, start);
531         if (!node)
532                 goto out;
533         state = rb_entry(node, struct extent_state, rb_node);
534 hit_next:
535         if (state->start > end)
536                 goto out;
537         WARN_ON(state->end < start);
538         last_end = state->end;
539
540         /* the state doesn't have the wanted bits, go ahead */
541         if (!(state->state & bits)) {
542                 state = next_state(state);
543                 goto next;
544         }
545
546         /*
547          *     | ---- desired range ---- |
548          *  | state | or
549          *  | ------------- state -------------- |
550          *
551          * We need to split the extent we found, and may flip
552          * bits on second half.
553          *
554          * If the extent we found extends past our range, we
555          * just split and search again.  It'll get split again
556          * the next time though.
557          *
558          * If the extent we found is inside our range, we clear
559          * the desired bit on it.
560          */
561
562         if (state->start < start) {
563                 prealloc = alloc_extent_state_atomic(prealloc);
564                 BUG_ON(!prealloc);
565                 err = split_state(tree, state, prealloc, start);
566                 if (err)
567                         extent_io_tree_panic(tree, err);
568
569                 prealloc = NULL;
570                 if (err)
571                         goto out;
572                 if (state->end <= end) {
573                         state = clear_state_bit(tree, state, &bits, wake);
574                         goto next;
575                 }
576                 goto search_again;
577         }
578         /*
579          * | ---- desired range ---- |
580          *                        | state |
581          * We need to split the extent, and clear the bit
582          * on the first half
583          */
584         if (state->start <= end && state->end > end) {
585                 prealloc = alloc_extent_state_atomic(prealloc);
586                 BUG_ON(!prealloc);
587                 err = split_state(tree, state, prealloc, end + 1);
588                 if (err)
589                         extent_io_tree_panic(tree, err);
590
591                 if (wake)
592                         wake_up(&state->wq);
593
594                 clear_state_bit(tree, prealloc, &bits, wake);
595
596                 prealloc = NULL;
597                 goto out;
598         }
599
600         state = clear_state_bit(tree, state, &bits, wake);
601 next:
602         if (last_end == (u64)-1)
603                 goto out;
604         start = last_end + 1;
605         if (start <= end && state && !need_resched())
606                 goto hit_next;
607         goto search_again;
608
609 out:
610         spin_unlock(&tree->lock);
611         if (prealloc)
612                 free_extent_state(prealloc);
613
614         return 0;
615
616 search_again:
617         if (start > end)
618                 goto out;
619         spin_unlock(&tree->lock);
620         if (mask & __GFP_WAIT)
621                 cond_resched();
622         goto again;
623 }
624
625 static void wait_on_state(struct extent_io_tree *tree,
626                           struct extent_state *state)
627                 __releases(tree->lock)
628                 __acquires(tree->lock)
629 {
630         DEFINE_WAIT(wait);
631         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
632         spin_unlock(&tree->lock);
633         schedule();
634         spin_lock(&tree->lock);
635         finish_wait(&state->wq, &wait);
636 }
637
638 /*
639  * waits for one or more bits to clear on a range in the state tree.
640  * The range [start, end] is inclusive.
641  * The tree lock is taken by this function
642  */
643 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
644 {
645         struct extent_state *state;
646         struct rb_node *node;
647
648         spin_lock(&tree->lock);
649 again:
650         while (1) {
651                 /*
652                  * this search will find all the extents that end after
653                  * our range starts
654                  */
655                 node = tree_search(tree, start);
656                 if (!node)
657                         break;
658
659                 state = rb_entry(node, struct extent_state, rb_node);
660
661                 if (state->start > end)
662                         goto out;
663
664                 if (state->state & bits) {
665                         start = state->start;
666                         atomic_inc(&state->refs);
667                         wait_on_state(tree, state);
668                         free_extent_state(state);
669                         goto again;
670                 }
671                 start = state->end + 1;
672
673                 if (start > end)
674                         break;
675
676                 cond_resched_lock(&tree->lock);
677         }
678 out:
679         spin_unlock(&tree->lock);
680 }
681
682 static void set_state_bits(struct extent_io_tree *tree,
683                            struct extent_state *state,
684                            int *bits)
685 {
686         int bits_to_set = *bits & ~EXTENT_CTLBITS;
687
688         set_state_cb(tree, state, bits);
689         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
690                 u64 range = state->end - state->start + 1;
691                 tree->dirty_bytes += range;
692         }
693         state->state |= bits_to_set;
694 }
695
696 static void cache_state(struct extent_state *state,
697                         struct extent_state **cached_ptr)
698 {
699         if (cached_ptr && !(*cached_ptr)) {
700                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
701                         *cached_ptr = state;
702                         atomic_inc(&state->refs);
703                 }
704         }
705 }
706
707 static void uncache_state(struct extent_state **cached_ptr)
708 {
709         if (cached_ptr && (*cached_ptr)) {
710                 struct extent_state *state = *cached_ptr;
711                 *cached_ptr = NULL;
712                 free_extent_state(state);
713         }
714 }
715
716 /*
717  * set some bits on a range in the tree.  This may require allocations or
718  * sleeping, so the gfp mask is used to indicate what is allowed.
719  *
720  * If any of the exclusive bits are set, this will fail with -EEXIST if some
721  * part of the range already has the desired bits set.  The start of the
722  * existing range is returned in failed_start in this case.
723  *
724  * [start, end] is inclusive This takes the tree lock.
725  */
726
727 static int __must_check
728 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
729                  int bits, int exclusive_bits, u64 *failed_start,
730                  struct extent_state **cached_state, gfp_t mask)
731 {
732         struct extent_state *state;
733         struct extent_state *prealloc = NULL;
734         struct rb_node *node;
735         int err = 0;
736         u64 last_start;
737         u64 last_end;
738
739         bits |= EXTENT_FIRST_DELALLOC;
740 again:
741         if (!prealloc && (mask & __GFP_WAIT)) {
742                 prealloc = alloc_extent_state(mask);
743                 BUG_ON(!prealloc);
744         }
745
746         spin_lock(&tree->lock);
747         if (cached_state && *cached_state) {
748                 state = *cached_state;
749                 if (state->start <= start && state->end > start &&
750                     state->tree) {
751                         node = &state->rb_node;
752                         goto hit_next;
753                 }
754         }
755         /*
756          * this search will find all the extents that end after
757          * our range starts.
758          */
759         node = tree_search(tree, start);
760         if (!node) {
761                 prealloc = alloc_extent_state_atomic(prealloc);
762                 BUG_ON(!prealloc);
763                 err = insert_state(tree, prealloc, start, end, &bits);
764                 if (err)
765                         extent_io_tree_panic(tree, err);
766
767                 prealloc = NULL;
768                 goto out;
769         }
770         state = rb_entry(node, struct extent_state, rb_node);
771 hit_next:
772         last_start = state->start;
773         last_end = state->end;
774
775         /*
776          * | ---- desired range ---- |
777          * | state |
778          *
779          * Just lock what we found and keep going
780          */
781         if (state->start == start && state->end <= end) {
782                 if (state->state & exclusive_bits) {
783                         *failed_start = state->start;
784                         err = -EEXIST;
785                         goto out;
786                 }
787
788                 set_state_bits(tree, state, &bits);
789                 cache_state(state, cached_state);
790                 merge_state(tree, state);
791                 if (last_end == (u64)-1)
792                         goto out;
793                 start = last_end + 1;
794                 state = next_state(state);
795                 if (start < end && state && state->start == start &&
796                     !need_resched())
797                         goto hit_next;
798                 goto search_again;
799         }
800
801         /*
802          *     | ---- desired range ---- |
803          * | state |
804          *   or
805          * | ------------- state -------------- |
806          *
807          * We need to split the extent we found, and may flip bits on
808          * second half.
809          *
810          * If the extent we found extends past our
811          * range, we just split and search again.  It'll get split
812          * again the next time though.
813          *
814          * If the extent we found is inside our range, we set the
815          * desired bit on it.
816          */
817         if (state->start < start) {
818                 if (state->state & exclusive_bits) {
819                         *failed_start = start;
820                         err = -EEXIST;
821                         goto out;
822                 }
823
824                 prealloc = alloc_extent_state_atomic(prealloc);
825                 BUG_ON(!prealloc);
826                 err = split_state(tree, state, prealloc, start);
827                 if (err)
828                         extent_io_tree_panic(tree, err);
829
830                 prealloc = NULL;
831                 if (err)
832                         goto out;
833                 if (state->end <= end) {
834                         set_state_bits(tree, state, &bits);
835                         cache_state(state, cached_state);
836                         merge_state(tree, state);
837                         if (last_end == (u64)-1)
838                                 goto out;
839                         start = last_end + 1;
840                         state = next_state(state);
841                         if (start < end && state && state->start == start &&
842                             !need_resched())
843                                 goto hit_next;
844                 }
845                 goto search_again;
846         }
847         /*
848          * | ---- desired range ---- |
849          *     | state | or               | state |
850          *
851          * There's a hole, we need to insert something in it and
852          * ignore the extent we found.
853          */
854         if (state->start > start) {
855                 u64 this_end;
856                 if (end < last_start)
857                         this_end = end;
858                 else
859                         this_end = last_start - 1;
860
861                 prealloc = alloc_extent_state_atomic(prealloc);
862                 BUG_ON(!prealloc);
863
864                 /*
865                  * Avoid to free 'prealloc' if it can be merged with
866                  * the later extent.
867                  */
868                 err = insert_state(tree, prealloc, start, this_end,
869                                    &bits);
870                 if (err)
871                         extent_io_tree_panic(tree, err);
872
873                 cache_state(prealloc, cached_state);
874                 prealloc = NULL;
875                 start = this_end + 1;
876                 goto search_again;
877         }
878         /*
879          * | ---- desired range ---- |
880          *                        | state |
881          * We need to split the extent, and set the bit
882          * on the first half
883          */
884         if (state->start <= end && state->end > end) {
885                 if (state->state & exclusive_bits) {
886                         *failed_start = start;
887                         err = -EEXIST;
888                         goto out;
889                 }
890
891                 prealloc = alloc_extent_state_atomic(prealloc);
892                 BUG_ON(!prealloc);
893                 err = split_state(tree, state, prealloc, end + 1);
894                 if (err)
895                         extent_io_tree_panic(tree, err);
896
897                 set_state_bits(tree, prealloc, &bits);
898                 cache_state(prealloc, cached_state);
899                 merge_state(tree, prealloc);
900                 prealloc = NULL;
901                 goto out;
902         }
903
904         goto search_again;
905
906 out:
907         spin_unlock(&tree->lock);
908         if (prealloc)
909                 free_extent_state(prealloc);
910
911         return err;
912
913 search_again:
914         if (start > end)
915                 goto out;
916         spin_unlock(&tree->lock);
917         if (mask & __GFP_WAIT)
918                 cond_resched();
919         goto again;
920 }
921
922 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
923                    u64 *failed_start, struct extent_state **cached_state,
924                    gfp_t mask)
925 {
926         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
927                                 cached_state, mask);
928 }
929
930
931 /**
932  * convert_extent - convert all bits in a given range from one bit to another
933  * @tree:       the io tree to search
934  * @start:      the start offset in bytes
935  * @end:        the end offset in bytes (inclusive)
936  * @bits:       the bits to set in this range
937  * @clear_bits: the bits to clear in this range
938  * @mask:       the allocation mask
939  *
940  * This will go through and set bits for the given range.  If any states exist
941  * already in this range they are set with the given bit and cleared of the
942  * clear_bits.  This is only meant to be used by things that are mergeable, ie
943  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
944  * boundary bits like LOCK.
945  */
946 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
947                        int bits, int clear_bits, gfp_t mask)
948 {
949         struct extent_state *state;
950         struct extent_state *prealloc = NULL;
951         struct rb_node *node;
952         int err = 0;
953         u64 last_start;
954         u64 last_end;
955
956 again:
957         if (!prealloc && (mask & __GFP_WAIT)) {
958                 prealloc = alloc_extent_state(mask);
959                 if (!prealloc)
960                         return -ENOMEM;
961         }
962
963         spin_lock(&tree->lock);
964         /*
965          * this search will find all the extents that end after
966          * our range starts.
967          */
968         node = tree_search(tree, start);
969         if (!node) {
970                 prealloc = alloc_extent_state_atomic(prealloc);
971                 if (!prealloc) {
972                         err = -ENOMEM;
973                         goto out;
974                 }
975                 err = insert_state(tree, prealloc, start, end, &bits);
976                 prealloc = NULL;
977                 if (err)
978                         extent_io_tree_panic(tree, err);
979                 goto out;
980         }
981         state = rb_entry(node, struct extent_state, rb_node);
982 hit_next:
983         last_start = state->start;
984         last_end = state->end;
985
986         /*
987          * | ---- desired range ---- |
988          * | state |
989          *
990          * Just lock what we found and keep going
991          */
992         if (state->start == start && state->end <= end) {
993                 set_state_bits(tree, state, &bits);
994                 state = clear_state_bit(tree, state, &clear_bits, 0);
995                 if (last_end == (u64)-1)
996                         goto out;
997                 start = last_end + 1;
998                 if (start < end && state && state->start == start &&
999                     !need_resched())
1000                         goto hit_next;
1001                 goto search_again;
1002         }
1003
1004         /*
1005          *     | ---- desired range ---- |
1006          * | state |
1007          *   or
1008          * | ------------- state -------------- |
1009          *
1010          * We need to split the extent we found, and may flip bits on
1011          * second half.
1012          *
1013          * If the extent we found extends past our
1014          * range, we just split and search again.  It'll get split
1015          * again the next time though.
1016          *
1017          * If the extent we found is inside our range, we set the
1018          * desired bit on it.
1019          */
1020         if (state->start < start) {
1021                 prealloc = alloc_extent_state_atomic(prealloc);
1022                 if (!prealloc) {
1023                         err = -ENOMEM;
1024                         goto out;
1025                 }
1026                 err = split_state(tree, state, prealloc, start);
1027                 if (err)
1028                         extent_io_tree_panic(tree, err);
1029                 prealloc = NULL;
1030                 if (err)
1031                         goto out;
1032                 if (state->end <= end) {
1033                         set_state_bits(tree, state, &bits);
1034                         state = clear_state_bit(tree, state, &clear_bits, 0);
1035                         if (last_end == (u64)-1)
1036                                 goto out;
1037                         start = last_end + 1;
1038                         if (start < end && state && state->start == start &&
1039                             !need_resched())
1040                                 goto hit_next;
1041                 }
1042                 goto search_again;
1043         }
1044         /*
1045          * | ---- desired range ---- |
1046          *     | state | or               | state |
1047          *
1048          * There's a hole, we need to insert something in it and
1049          * ignore the extent we found.
1050          */
1051         if (state->start > start) {
1052                 u64 this_end;
1053                 if (end < last_start)
1054                         this_end = end;
1055                 else
1056                         this_end = last_start - 1;
1057
1058                 prealloc = alloc_extent_state_atomic(prealloc);
1059                 if (!prealloc) {
1060                         err = -ENOMEM;
1061                         goto out;
1062                 }
1063
1064                 /*
1065                  * Avoid to free 'prealloc' if it can be merged with
1066                  * the later extent.
1067                  */
1068                 err = insert_state(tree, prealloc, start, this_end,
1069                                    &bits);
1070                 if (err)
1071                         extent_io_tree_panic(tree, err);
1072                 prealloc = NULL;
1073                 start = this_end + 1;
1074                 goto search_again;
1075         }
1076         /*
1077          * | ---- desired range ---- |
1078          *                        | state |
1079          * We need to split the extent, and set the bit
1080          * on the first half
1081          */
1082         if (state->start <= end && state->end > end) {
1083                 prealloc = alloc_extent_state_atomic(prealloc);
1084                 if (!prealloc) {
1085                         err = -ENOMEM;
1086                         goto out;
1087                 }
1088
1089                 err = split_state(tree, state, prealloc, end + 1);
1090                 if (err)
1091                         extent_io_tree_panic(tree, err);
1092
1093                 set_state_bits(tree, prealloc, &bits);
1094                 clear_state_bit(tree, prealloc, &clear_bits, 0);
1095                 prealloc = NULL;
1096                 goto out;
1097         }
1098
1099         goto search_again;
1100
1101 out:
1102         spin_unlock(&tree->lock);
1103         if (prealloc)
1104                 free_extent_state(prealloc);
1105
1106         return err;
1107
1108 search_again:
1109         if (start > end)
1110                 goto out;
1111         spin_unlock(&tree->lock);
1112         if (mask & __GFP_WAIT)
1113                 cond_resched();
1114         goto again;
1115 }
1116
1117 /* wrappers around set/clear extent bit */
1118 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1119                      gfp_t mask)
1120 {
1121         return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1122                               NULL, mask);
1123 }
1124
1125 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1126                     int bits, gfp_t mask)
1127 {
1128         return set_extent_bit(tree, start, end, bits, NULL,
1129                               NULL, mask);
1130 }
1131
1132 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1133                       int bits, gfp_t mask)
1134 {
1135         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1136 }
1137
1138 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1139                         struct extent_state **cached_state, gfp_t mask)
1140 {
1141         return set_extent_bit(tree, start, end,
1142                               EXTENT_DELALLOC | EXTENT_UPTODATE,
1143                               NULL, cached_state, mask);
1144 }
1145
1146 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1147                        gfp_t mask)
1148 {
1149         return clear_extent_bit(tree, start, end,
1150                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1151                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1152 }
1153
1154 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1155                      gfp_t mask)
1156 {
1157         return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1158                               NULL, mask);
1159 }
1160
1161 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1162                         struct extent_state **cached_state, gfp_t mask)
1163 {
1164         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1165                               cached_state, mask);
1166 }
1167
1168 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1169                           struct extent_state **cached_state, gfp_t mask)
1170 {
1171         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1172                                 cached_state, mask);
1173 }
1174
1175 /*
1176  * either insert or lock state struct between start and end use mask to tell
1177  * us if waiting is desired.
1178  */
1179 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1180                      int bits, struct extent_state **cached_state)
1181 {
1182         int err;
1183         u64 failed_start;
1184         while (1) {
1185                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1186                                        EXTENT_LOCKED, &failed_start,
1187                                        cached_state, GFP_NOFS);
1188                 if (err == -EEXIST) {
1189                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1190                         start = failed_start;
1191                 } else
1192                         break;
1193                 WARN_ON(start > end);
1194         }
1195         return err;
1196 }
1197
1198 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1199 {
1200         return lock_extent_bits(tree, start, end, 0, NULL);
1201 }
1202
1203 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1204 {
1205         int err;
1206         u64 failed_start;
1207
1208         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1209                                &failed_start, NULL, GFP_NOFS);
1210         if (err == -EEXIST) {
1211                 if (failed_start > start)
1212                         clear_extent_bit(tree, start, failed_start - 1,
1213                                          EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1214                 return 0;
1215         }
1216         return 1;
1217 }
1218
1219 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1220                          struct extent_state **cached, gfp_t mask)
1221 {
1222         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1223                                 mask);
1224 }
1225
1226 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1227 {
1228         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1229                                 GFP_NOFS);
1230 }
1231
1232 /*
1233  * helper function to set both pages and extents in the tree writeback
1234  */
1235 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1236 {
1237         unsigned long index = start >> PAGE_CACHE_SHIFT;
1238         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1239         struct page *page;
1240
1241         while (index <= end_index) {
1242                 page = find_get_page(tree->mapping, index);
1243                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1244                 set_page_writeback(page);
1245                 page_cache_release(page);
1246                 index++;
1247         }
1248         return 0;
1249 }
1250
1251 /* find the first state struct with 'bits' set after 'start', and
1252  * return it.  tree->lock must be held.  NULL will returned if
1253  * nothing was found after 'start'
1254  */
1255 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1256                                                  u64 start, int bits)
1257 {
1258         struct rb_node *node;
1259         struct extent_state *state;
1260
1261         /*
1262          * this search will find all the extents that end after
1263          * our range starts.
1264          */
1265         node = tree_search(tree, start);
1266         if (!node)
1267                 goto out;
1268
1269         while (1) {
1270                 state = rb_entry(node, struct extent_state, rb_node);
1271                 if (state->end >= start && (state->state & bits))
1272                         return state;
1273
1274                 node = rb_next(node);
1275                 if (!node)
1276                         break;
1277         }
1278 out:
1279         return NULL;
1280 }
1281
1282 /*
1283  * find the first offset in the io tree with 'bits' set. zero is
1284  * returned if we find something, and *start_ret and *end_ret are
1285  * set to reflect the state struct that was found.
1286  *
1287  * If nothing was found, 1 is returned. If found something, return 0.
1288  */
1289 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1290                           u64 *start_ret, u64 *end_ret, int bits)
1291 {
1292         struct extent_state *state;
1293         int ret = 1;
1294
1295         spin_lock(&tree->lock);
1296         state = find_first_extent_bit_state(tree, start, bits);
1297         if (state) {
1298                 *start_ret = state->start;
1299                 *end_ret = state->end;
1300                 ret = 0;
1301         }
1302         spin_unlock(&tree->lock);
1303         return ret;
1304 }
1305
1306 /*
1307  * find a contiguous range of bytes in the file marked as delalloc, not
1308  * more than 'max_bytes'.  start and end are used to return the range,
1309  *
1310  * 1 is returned if we find something, 0 if nothing was in the tree
1311  */
1312 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1313                                         u64 *start, u64 *end, u64 max_bytes,
1314                                         struct extent_state **cached_state)
1315 {
1316         struct rb_node *node;
1317         struct extent_state *state;
1318         u64 cur_start = *start;
1319         u64 found = 0;
1320         u64 total_bytes = 0;
1321
1322         spin_lock(&tree->lock);
1323
1324         /*
1325          * this search will find all the extents that end after
1326          * our range starts.
1327          */
1328         node = tree_search(tree, cur_start);
1329         if (!node) {
1330                 if (!found)
1331                         *end = (u64)-1;
1332                 goto out;
1333         }
1334
1335         while (1) {
1336                 state = rb_entry(node, struct extent_state, rb_node);
1337                 if (found && (state->start != cur_start ||
1338                               (state->state & EXTENT_BOUNDARY))) {
1339                         goto out;
1340                 }
1341                 if (!(state->state & EXTENT_DELALLOC)) {
1342                         if (!found)
1343                                 *end = state->end;
1344                         goto out;
1345                 }
1346                 if (!found) {
1347                         *start = state->start;
1348                         *cached_state = state;
1349                         atomic_inc(&state->refs);
1350                 }
1351                 found++;
1352                 *end = state->end;
1353                 cur_start = state->end + 1;
1354                 node = rb_next(node);
1355                 if (!node)
1356                         break;
1357                 total_bytes += state->end - state->start + 1;
1358                 if (total_bytes >= max_bytes)
1359                         break;
1360         }
1361 out:
1362         spin_unlock(&tree->lock);
1363         return found;
1364 }
1365
1366 static noinline void __unlock_for_delalloc(struct inode *inode,
1367                                            struct page *locked_page,
1368                                            u64 start, u64 end)
1369 {
1370         int ret;
1371         struct page *pages[16];
1372         unsigned long index = start >> PAGE_CACHE_SHIFT;
1373         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1374         unsigned long nr_pages = end_index - index + 1;
1375         int i;
1376
1377         if (index == locked_page->index && end_index == index)
1378                 return;
1379
1380         while (nr_pages > 0) {
1381                 ret = find_get_pages_contig(inode->i_mapping, index,
1382                                      min_t(unsigned long, nr_pages,
1383                                      ARRAY_SIZE(pages)), pages);
1384                 for (i = 0; i < ret; i++) {
1385                         if (pages[i] != locked_page)
1386                                 unlock_page(pages[i]);
1387                         page_cache_release(pages[i]);
1388                 }
1389                 nr_pages -= ret;
1390                 index += ret;
1391                 cond_resched();
1392         }
1393 }
1394
1395 static noinline int lock_delalloc_pages(struct inode *inode,
1396                                         struct page *locked_page,
1397                                         u64 delalloc_start,
1398                                         u64 delalloc_end)
1399 {
1400         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1401         unsigned long start_index = index;
1402         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1403         unsigned long pages_locked = 0;
1404         struct page *pages[16];
1405         unsigned long nrpages;
1406         int ret;
1407         int i;
1408
1409         /* the caller is responsible for locking the start index */
1410         if (index == locked_page->index && index == end_index)
1411                 return 0;
1412
1413         /* skip the page at the start index */
1414         nrpages = end_index - index + 1;
1415         while (nrpages > 0) {
1416                 ret = find_get_pages_contig(inode->i_mapping, index,
1417                                      min_t(unsigned long,
1418                                      nrpages, ARRAY_SIZE(pages)), pages);
1419                 if (ret == 0) {
1420                         ret = -EAGAIN;
1421                         goto done;
1422                 }
1423                 /* now we have an array of pages, lock them all */
1424                 for (i = 0; i < ret; i++) {
1425                         /*
1426                          * the caller is taking responsibility for
1427                          * locked_page
1428                          */
1429                         if (pages[i] != locked_page) {
1430                                 lock_page(pages[i]);
1431                                 if (!PageDirty(pages[i]) ||
1432                                     pages[i]->mapping != inode->i_mapping) {
1433                                         ret = -EAGAIN;
1434                                         unlock_page(pages[i]);
1435                                         page_cache_release(pages[i]);
1436                                         goto done;
1437                                 }
1438                         }
1439                         page_cache_release(pages[i]);
1440                         pages_locked++;
1441                 }
1442                 nrpages -= ret;
1443                 index += ret;
1444                 cond_resched();
1445         }
1446         ret = 0;
1447 done:
1448         if (ret && pages_locked) {
1449                 __unlock_for_delalloc(inode, locked_page,
1450                               delalloc_start,
1451                               ((u64)(start_index + pages_locked - 1)) <<
1452                               PAGE_CACHE_SHIFT);
1453         }
1454         return ret;
1455 }
1456
1457 /*
1458  * find a contiguous range of bytes in the file marked as delalloc, not
1459  * more than 'max_bytes'.  start and end are used to return the range,
1460  *
1461  * 1 is returned if we find something, 0 if nothing was in the tree
1462  */
1463 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1464                                              struct extent_io_tree *tree,
1465                                              struct page *locked_page,
1466                                              u64 *start, u64 *end,
1467                                              u64 max_bytes)
1468 {
1469         u64 delalloc_start;
1470         u64 delalloc_end;
1471         u64 found;
1472         struct extent_state *cached_state = NULL;
1473         int ret;
1474         int loops = 0;
1475
1476 again:
1477         /* step one, find a bunch of delalloc bytes starting at start */
1478         delalloc_start = *start;
1479         delalloc_end = 0;
1480         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1481                                     max_bytes, &cached_state);
1482         if (!found || delalloc_end <= *start) {
1483                 *start = delalloc_start;
1484                 *end = delalloc_end;
1485                 free_extent_state(cached_state);
1486                 return found;
1487         }
1488
1489         /*
1490          * start comes from the offset of locked_page.  We have to lock
1491          * pages in order, so we can't process delalloc bytes before
1492          * locked_page
1493          */
1494         if (delalloc_start < *start)
1495                 delalloc_start = *start;
1496
1497         /*
1498          * make sure to limit the number of pages we try to lock down
1499          * if we're looping.
1500          */
1501         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1502                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1503
1504         /* step two, lock all the pages after the page that has start */
1505         ret = lock_delalloc_pages(inode, locked_page,
1506                                   delalloc_start, delalloc_end);
1507         if (ret == -EAGAIN) {
1508                 /* some of the pages are gone, lets avoid looping by
1509                  * shortening the size of the delalloc range we're searching
1510                  */
1511                 free_extent_state(cached_state);
1512                 if (!loops) {
1513                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1514                         max_bytes = PAGE_CACHE_SIZE - offset;
1515                         loops = 1;
1516                         goto again;
1517                 } else {
1518                         found = 0;
1519                         goto out_failed;
1520                 }
1521         }
1522         BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1523
1524         /* step three, lock the state bits for the whole range */
1525         lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1526
1527         /* then test to make sure it is all still delalloc */
1528         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1529                              EXTENT_DELALLOC, 1, cached_state);
1530         if (!ret) {
1531                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1532                                      &cached_state, GFP_NOFS);
1533                 __unlock_for_delalloc(inode, locked_page,
1534                               delalloc_start, delalloc_end);
1535                 cond_resched();
1536                 goto again;
1537         }
1538         free_extent_state(cached_state);
1539         *start = delalloc_start;
1540         *end = delalloc_end;
1541 out_failed:
1542         return found;
1543 }
1544
1545 int extent_clear_unlock_delalloc(struct inode *inode,
1546                                 struct extent_io_tree *tree,
1547                                 u64 start, u64 end, struct page *locked_page,
1548                                 unsigned long op)
1549 {
1550         int ret;
1551         struct page *pages[16];
1552         unsigned long index = start >> PAGE_CACHE_SHIFT;
1553         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1554         unsigned long nr_pages = end_index - index + 1;
1555         int i;
1556         int clear_bits = 0;
1557
1558         if (op & EXTENT_CLEAR_UNLOCK)
1559                 clear_bits |= EXTENT_LOCKED;
1560         if (op & EXTENT_CLEAR_DIRTY)
1561                 clear_bits |= EXTENT_DIRTY;
1562
1563         if (op & EXTENT_CLEAR_DELALLOC)
1564                 clear_bits |= EXTENT_DELALLOC;
1565
1566         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1567         if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1568                     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1569                     EXTENT_SET_PRIVATE2)))
1570                 return 0;
1571
1572         while (nr_pages > 0) {
1573                 ret = find_get_pages_contig(inode->i_mapping, index,
1574                                      min_t(unsigned long,
1575                                      nr_pages, ARRAY_SIZE(pages)), pages);
1576                 for (i = 0; i < ret; i++) {
1577
1578                         if (op & EXTENT_SET_PRIVATE2)
1579                                 SetPagePrivate2(pages[i]);
1580
1581                         if (pages[i] == locked_page) {
1582                                 page_cache_release(pages[i]);
1583                                 continue;
1584                         }
1585                         if (op & EXTENT_CLEAR_DIRTY)
1586                                 clear_page_dirty_for_io(pages[i]);
1587                         if (op & EXTENT_SET_WRITEBACK)
1588                                 set_page_writeback(pages[i]);
1589                         if (op & EXTENT_END_WRITEBACK)
1590                                 end_page_writeback(pages[i]);
1591                         if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1592                                 unlock_page(pages[i]);
1593                         page_cache_release(pages[i]);
1594                 }
1595                 nr_pages -= ret;
1596                 index += ret;
1597                 cond_resched();
1598         }
1599         return 0;
1600 }
1601
1602 /*
1603  * count the number of bytes in the tree that have a given bit(s)
1604  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1605  * cached.  The total number found is returned.
1606  */
1607 u64 count_range_bits(struct extent_io_tree *tree,
1608                      u64 *start, u64 search_end, u64 max_bytes,
1609                      unsigned long bits, int contig)
1610 {
1611         struct rb_node *node;
1612         struct extent_state *state;
1613         u64 cur_start = *start;
1614         u64 total_bytes = 0;
1615         u64 last = 0;
1616         int found = 0;
1617
1618         if (search_end <= cur_start) {
1619                 WARN_ON(1);
1620                 return 0;
1621         }
1622
1623         spin_lock(&tree->lock);
1624         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1625                 total_bytes = tree->dirty_bytes;
1626                 goto out;
1627         }
1628         /*
1629          * this search will find all the extents that end after
1630          * our range starts.
1631          */
1632         node = tree_search(tree, cur_start);
1633         if (!node)
1634                 goto out;
1635
1636         while (1) {
1637                 state = rb_entry(node, struct extent_state, rb_node);
1638                 if (state->start > search_end)
1639                         break;
1640                 if (contig && found && state->start > last + 1)
1641                         break;
1642                 if (state->end >= cur_start && (state->state & bits) == bits) {
1643                         total_bytes += min(search_end, state->end) + 1 -
1644                                        max(cur_start, state->start);
1645                         if (total_bytes >= max_bytes)
1646                                 break;
1647                         if (!found) {
1648                                 *start = max(cur_start, state->start);
1649                                 found = 1;
1650                         }
1651                         last = state->end;
1652                 } else if (contig && found) {
1653                         break;
1654                 }
1655                 node = rb_next(node);
1656                 if (!node)
1657                         break;
1658         }
1659 out:
1660         spin_unlock(&tree->lock);
1661         return total_bytes;
1662 }
1663
1664 /*
1665  * set the private field for a given byte offset in the tree.  If there isn't
1666  * an extent_state there already, this does nothing.
1667  */
1668 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1669 {
1670         struct rb_node *node;
1671         struct extent_state *state;
1672         int ret = 0;
1673
1674         spin_lock(&tree->lock);
1675         /*
1676          * this search will find all the extents that end after
1677          * our range starts.
1678          */
1679         node = tree_search(tree, start);
1680         if (!node) {
1681                 ret = -ENOENT;
1682                 goto out;
1683         }
1684         state = rb_entry(node, struct extent_state, rb_node);
1685         if (state->start != start) {
1686                 ret = -ENOENT;
1687                 goto out;
1688         }
1689         state->private = private;
1690 out:
1691         spin_unlock(&tree->lock);
1692         return ret;
1693 }
1694
1695 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1696 {
1697         struct rb_node *node;
1698         struct extent_state *state;
1699         int ret = 0;
1700
1701         spin_lock(&tree->lock);
1702         /*
1703          * this search will find all the extents that end after
1704          * our range starts.
1705          */
1706         node = tree_search(tree, start);
1707         if (!node) {
1708                 ret = -ENOENT;
1709                 goto out;
1710         }
1711         state = rb_entry(node, struct extent_state, rb_node);
1712         if (state->start != start) {
1713                 ret = -ENOENT;
1714                 goto out;
1715         }
1716         *private = state->private;
1717 out:
1718         spin_unlock(&tree->lock);
1719         return ret;
1720 }
1721
1722 /*
1723  * searches a range in the state tree for a given mask.
1724  * If 'filled' == 1, this returns 1 only if every extent in the tree
1725  * has the bits set.  Otherwise, 1 is returned if any bit in the
1726  * range is found set.
1727  */
1728 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1729                    int bits, int filled, struct extent_state *cached)
1730 {
1731         struct extent_state *state = NULL;
1732         struct rb_node *node;
1733         int bitset = 0;
1734
1735         spin_lock(&tree->lock);
1736         if (cached && cached->tree && cached->start <= start &&
1737             cached->end > start)
1738                 node = &cached->rb_node;
1739         else
1740                 node = tree_search(tree, start);
1741         while (node && start <= end) {
1742                 state = rb_entry(node, struct extent_state, rb_node);
1743
1744                 if (filled && state->start > start) {
1745                         bitset = 0;
1746                         break;
1747                 }
1748
1749                 if (state->start > end)
1750                         break;
1751
1752                 if (state->state & bits) {
1753                         bitset = 1;
1754                         if (!filled)
1755                                 break;
1756                 } else if (filled) {
1757                         bitset = 0;
1758                         break;
1759                 }
1760
1761                 if (state->end == (u64)-1)
1762                         break;
1763
1764                 start = state->end + 1;
1765                 if (start > end)
1766                         break;
1767                 node = rb_next(node);
1768                 if (!node) {
1769                         if (filled)
1770                                 bitset = 0;
1771                         break;
1772                 }
1773         }
1774         spin_unlock(&tree->lock);
1775         return bitset;
1776 }
1777
1778 /*
1779  * helper function to set a given page up to date if all the
1780  * extents in the tree for that page are up to date
1781  */
1782 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1783 {
1784         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1785         u64 end = start + PAGE_CACHE_SIZE - 1;
1786         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1787                 SetPageUptodate(page);
1788 }
1789
1790 /*
1791  * helper function to unlock a page if all the extents in the tree
1792  * for that page are unlocked
1793  */
1794 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1795 {
1796         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1797         u64 end = start + PAGE_CACHE_SIZE - 1;
1798         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1799                 unlock_page(page);
1800 }
1801
1802 /*
1803  * helper function to end page writeback if all the extents
1804  * in the tree for that page are done with writeback
1805  */
1806 static void check_page_writeback(struct extent_io_tree *tree,
1807                                  struct page *page)
1808 {
1809         end_page_writeback(page);
1810 }
1811
1812 /*
1813  * When IO fails, either with EIO or csum verification fails, we
1814  * try other mirrors that might have a good copy of the data.  This
1815  * io_failure_record is used to record state as we go through all the
1816  * mirrors.  If another mirror has good data, the page is set up to date
1817  * and things continue.  If a good mirror can't be found, the original
1818  * bio end_io callback is called to indicate things have failed.
1819  */
1820 struct io_failure_record {
1821         struct page *page;
1822         u64 start;
1823         u64 len;
1824         u64 logical;
1825         unsigned long bio_flags;
1826         int this_mirror;
1827         int failed_mirror;
1828         int in_validation;
1829 };
1830
1831 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1832                                 int did_repair)
1833 {
1834         int ret;
1835         int err = 0;
1836         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1837
1838         set_state_private(failure_tree, rec->start, 0);
1839         ret = clear_extent_bits(failure_tree, rec->start,
1840                                 rec->start + rec->len - 1,
1841                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1842         if (ret)
1843                 err = ret;
1844
1845         if (did_repair) {
1846                 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1847                                         rec->start + rec->len - 1,
1848                                         EXTENT_DAMAGED, GFP_NOFS);
1849                 if (ret && !err)
1850                         err = ret;
1851         }
1852
1853         kfree(rec);
1854         return err;
1855 }
1856
1857 static void repair_io_failure_callback(struct bio *bio, int err)
1858 {
1859         complete(bio->bi_private);
1860 }
1861
1862 /*
1863  * this bypasses the standard btrfs submit functions deliberately, as
1864  * the standard behavior is to write all copies in a raid setup. here we only
1865  * want to write the one bad copy. so we do the mapping for ourselves and issue
1866  * submit_bio directly.
1867  * to avoid any synchonization issues, wait for the data after writing, which
1868  * actually prevents the read that triggered the error from finishing.
1869  * currently, there can be no more than two copies of every data bit. thus,
1870  * exactly one rewrite is required.
1871  */
1872 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1873                         u64 length, u64 logical, struct page *page,
1874                         int mirror_num)
1875 {
1876         struct bio *bio;
1877         struct btrfs_device *dev;
1878         DECLARE_COMPLETION_ONSTACK(compl);
1879         u64 map_length = 0;
1880         u64 sector;
1881         struct btrfs_bio *bbio = NULL;
1882         int ret;
1883
1884         BUG_ON(!mirror_num);
1885
1886         bio = bio_alloc(GFP_NOFS, 1);
1887         if (!bio)
1888                 return -EIO;
1889         bio->bi_private = &compl;
1890         bio->bi_end_io = repair_io_failure_callback;
1891         bio->bi_size = 0;
1892         map_length = length;
1893
1894         ret = btrfs_map_block(map_tree, WRITE, logical,
1895                               &map_length, &bbio, mirror_num);
1896         if (ret) {
1897                 bio_put(bio);
1898                 return -EIO;
1899         }
1900         BUG_ON(mirror_num != bbio->mirror_num);
1901         sector = bbio->stripes[mirror_num-1].physical >> 9;
1902         bio->bi_sector = sector;
1903         dev = bbio->stripes[mirror_num-1].dev;
1904         kfree(bbio);
1905         if (!dev || !dev->bdev || !dev->writeable) {
1906                 bio_put(bio);
1907                 return -EIO;
1908         }
1909         bio->bi_bdev = dev->bdev;
1910         bio_add_page(bio, page, length, start-page_offset(page));
1911         btrfsic_submit_bio(WRITE_SYNC, bio);
1912         wait_for_completion(&compl);
1913
1914         if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1915                 /* try to remap that extent elsewhere? */
1916                 bio_put(bio);
1917                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
1918                 return -EIO;
1919         }
1920
1921         printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
1922                       "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1923                       start, rcu_str_deref(dev->name), sector);
1924
1925         bio_put(bio);
1926         return 0;
1927 }
1928
1929 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1930                          int mirror_num)
1931 {
1932         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1933         u64 start = eb->start;
1934         unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1935         int ret = 0;
1936
1937         for (i = 0; i < num_pages; i++) {
1938                 struct page *p = extent_buffer_page(eb, i);
1939                 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1940                                         start, p, mirror_num);
1941                 if (ret)
1942                         break;
1943                 start += PAGE_CACHE_SIZE;
1944         }
1945
1946         return ret;
1947 }
1948
1949 /*
1950  * each time an IO finishes, we do a fast check in the IO failure tree
1951  * to see if we need to process or clean up an io_failure_record
1952  */
1953 static int clean_io_failure(u64 start, struct page *page)
1954 {
1955         u64 private;
1956         u64 private_failure;
1957         struct io_failure_record *failrec;
1958         struct btrfs_mapping_tree *map_tree;
1959         struct extent_state *state;
1960         int num_copies;
1961         int did_repair = 0;
1962         int ret;
1963         struct inode *inode = page->mapping->host;
1964
1965         private = 0;
1966         ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1967                                 (u64)-1, 1, EXTENT_DIRTY, 0);
1968         if (!ret)
1969                 return 0;
1970
1971         ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1972                                 &private_failure);
1973         if (ret)
1974                 return 0;
1975
1976         failrec = (struct io_failure_record *)(unsigned long) private_failure;
1977         BUG_ON(!failrec->this_mirror);
1978
1979         if (failrec->in_validation) {
1980                 /* there was no real error, just free the record */
1981                 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1982                          failrec->start);
1983                 did_repair = 1;
1984                 goto out;
1985         }
1986
1987         spin_lock(&BTRFS_I(inode)->io_tree.lock);
1988         state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1989                                             failrec->start,
1990                                             EXTENT_LOCKED);
1991         spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1992
1993         if (state && state->start == failrec->start) {
1994                 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1995                 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1996                                                 failrec->len);
1997                 if (num_copies > 1)  {
1998                         ret = repair_io_failure(map_tree, start, failrec->len,
1999                                                 failrec->logical, page,
2000                                                 failrec->failed_mirror);
2001                         did_repair = !ret;
2002                 }
2003         }
2004
2005 out:
2006         if (!ret)
2007                 ret = free_io_failure(inode, failrec, did_repair);
2008
2009         return ret;
2010 }
2011
2012 /*
2013  * this is a generic handler for readpage errors (default
2014  * readpage_io_failed_hook). if other copies exist, read those and write back
2015  * good data to the failed position. does not investigate in remapping the
2016  * failed extent elsewhere, hoping the device will be smart enough to do this as
2017  * needed
2018  */
2019
2020 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2021                                 u64 start, u64 end, int failed_mirror,
2022                                 struct extent_state *state)
2023 {
2024         struct io_failure_record *failrec = NULL;
2025         u64 private;
2026         struct extent_map *em;
2027         struct inode *inode = page->mapping->host;
2028         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2029         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2030         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2031         struct bio *bio;
2032         int num_copies;
2033         int ret;
2034         int read_mode;
2035         u64 logical;
2036
2037         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2038
2039         ret = get_state_private(failure_tree, start, &private);
2040         if (ret) {
2041                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2042                 if (!failrec)
2043                         return -ENOMEM;
2044                 failrec->start = start;
2045                 failrec->len = end - start + 1;
2046                 failrec->this_mirror = 0;
2047                 failrec->bio_flags = 0;
2048                 failrec->in_validation = 0;
2049
2050                 read_lock(&em_tree->lock);
2051                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2052                 if (!em) {
2053                         read_unlock(&em_tree->lock);
2054                         kfree(failrec);
2055                         return -EIO;
2056                 }
2057
2058                 if (em->start > start || em->start + em->len < start) {
2059                         free_extent_map(em);
2060                         em = NULL;
2061                 }
2062                 read_unlock(&em_tree->lock);
2063
2064                 if (!em || IS_ERR(em)) {
2065                         kfree(failrec);
2066                         return -EIO;
2067                 }
2068                 logical = start - em->start;
2069                 logical = em->block_start + logical;
2070                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2071                         logical = em->block_start;
2072                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2073                         extent_set_compress_type(&failrec->bio_flags,
2074                                                  em->compress_type);
2075                 }
2076                 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2077                          "len=%llu\n", logical, start, failrec->len);
2078                 failrec->logical = logical;
2079                 free_extent_map(em);
2080
2081                 /* set the bits in the private failure tree */
2082                 ret = set_extent_bits(failure_tree, start, end,
2083                                         EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2084                 if (ret >= 0)
2085                         ret = set_state_private(failure_tree, start,
2086                                                 (u64)(unsigned long)failrec);
2087                 /* set the bits in the inode's tree */
2088                 if (ret >= 0)
2089                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2090                                                 GFP_NOFS);
2091                 if (ret < 0) {
2092                         kfree(failrec);
2093                         return ret;
2094                 }
2095         } else {
2096                 failrec = (struct io_failure_record *)(unsigned long)private;
2097                 pr_debug("bio_readpage_error: (found) logical=%llu, "
2098                          "start=%llu, len=%llu, validation=%d\n",
2099                          failrec->logical, failrec->start, failrec->len,
2100                          failrec->in_validation);
2101                 /*
2102                  * when data can be on disk more than twice, add to failrec here
2103                  * (e.g. with a list for failed_mirror) to make
2104                  * clean_io_failure() clean all those errors at once.
2105                  */
2106         }
2107         num_copies = btrfs_num_copies(
2108                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
2109                               failrec->logical, failrec->len);
2110         if (num_copies == 1) {
2111                 /*
2112                  * we only have a single copy of the data, so don't bother with
2113                  * all the retry and error correction code that follows. no
2114                  * matter what the error is, it is very likely to persist.
2115                  */
2116                 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2117                          "state=%p, num_copies=%d, next_mirror %d, "
2118                          "failed_mirror %d\n", state, num_copies,
2119                          failrec->this_mirror, failed_mirror);
2120                 free_io_failure(inode, failrec, 0);
2121                 return -EIO;
2122         }
2123
2124         if (!state) {
2125                 spin_lock(&tree->lock);
2126                 state = find_first_extent_bit_state(tree, failrec->start,
2127                                                     EXTENT_LOCKED);
2128                 if (state && state->start != failrec->start)
2129                         state = NULL;
2130                 spin_unlock(&tree->lock);
2131         }
2132
2133         /*
2134          * there are two premises:
2135          *      a) deliver good data to the caller
2136          *      b) correct the bad sectors on disk
2137          */
2138         if (failed_bio->bi_vcnt > 1) {
2139                 /*
2140                  * to fulfill b), we need to know the exact failing sectors, as
2141                  * we don't want to rewrite any more than the failed ones. thus,
2142                  * we need separate read requests for the failed bio
2143                  *
2144                  * if the following BUG_ON triggers, our validation request got
2145                  * merged. we need separate requests for our algorithm to work.
2146                  */
2147                 BUG_ON(failrec->in_validation);
2148                 failrec->in_validation = 1;
2149                 failrec->this_mirror = failed_mirror;
2150                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2151         } else {
2152                 /*
2153                  * we're ready to fulfill a) and b) alongside. get a good copy
2154                  * of the failed sector and if we succeed, we have setup
2155                  * everything for repair_io_failure to do the rest for us.
2156                  */
2157                 if (failrec->in_validation) {
2158                         BUG_ON(failrec->this_mirror != failed_mirror);
2159                         failrec->in_validation = 0;
2160                         failrec->this_mirror = 0;
2161                 }
2162                 failrec->failed_mirror = failed_mirror;
2163                 failrec->this_mirror++;
2164                 if (failrec->this_mirror == failed_mirror)
2165                         failrec->this_mirror++;
2166                 read_mode = READ_SYNC;
2167         }
2168
2169         if (!state || failrec->this_mirror > num_copies) {
2170                 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2171                          "next_mirror %d, failed_mirror %d\n", state,
2172                          num_copies, failrec->this_mirror, failed_mirror);
2173                 free_io_failure(inode, failrec, 0);
2174                 return -EIO;
2175         }
2176
2177         bio = bio_alloc(GFP_NOFS, 1);
2178         if (!bio) {
2179                 free_io_failure(inode, failrec, 0);
2180                 return -EIO;
2181         }
2182         bio->bi_private = state;
2183         bio->bi_end_io = failed_bio->bi_end_io;
2184         bio->bi_sector = failrec->logical >> 9;
2185         bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2186         bio->bi_size = 0;
2187
2188         bio_add_page(bio, page, failrec->len, start - page_offset(page));
2189
2190         pr_debug("bio_readpage_error: submitting new read[%#x] to "
2191                  "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2192                  failrec->this_mirror, num_copies, failrec->in_validation);
2193
2194         ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2195                                          failrec->this_mirror,
2196                                          failrec->bio_flags, 0);
2197         return ret;
2198 }
2199
2200 /* lots and lots of room for performance fixes in the end_bio funcs */
2201
2202 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2203 {
2204         int uptodate = (err == 0);
2205         struct extent_io_tree *tree;
2206         int ret;
2207
2208         tree = &BTRFS_I(page->mapping->host)->io_tree;
2209
2210         if (tree->ops && tree->ops->writepage_end_io_hook) {
2211                 ret = tree->ops->writepage_end_io_hook(page, start,
2212                                                end, NULL, uptodate);
2213                 if (ret)
2214                         uptodate = 0;
2215         }
2216
2217         if (!uptodate) {
2218                 ClearPageUptodate(page);
2219                 SetPageError(page);
2220         }
2221         return 0;
2222 }
2223
2224 /*
2225  * after a writepage IO is done, we need to:
2226  * clear the uptodate bits on error
2227  * clear the writeback bits in the extent tree for this IO
2228  * end_page_writeback if the page has no more pending IO
2229  *
2230  * Scheduling is not allowed, so the extent state tree is expected
2231  * to have one and only one object corresponding to this IO.
2232  */
2233 static void end_bio_extent_writepage(struct bio *bio, int err)
2234 {
2235         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2236         struct extent_io_tree *tree;
2237         u64 start;
2238         u64 end;
2239         int whole_page;
2240
2241         do {
2242                 struct page *page = bvec->bv_page;
2243                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2244
2245                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2246                          bvec->bv_offset;
2247                 end = start + bvec->bv_len - 1;
2248
2249                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2250                         whole_page = 1;
2251                 else
2252                         whole_page = 0;
2253
2254                 if (--bvec >= bio->bi_io_vec)
2255                         prefetchw(&bvec->bv_page->flags);
2256
2257                 if (end_extent_writepage(page, err, start, end))
2258                         continue;
2259
2260                 if (whole_page)
2261                         end_page_writeback(page);
2262                 else
2263                         check_page_writeback(tree, page);
2264         } while (bvec >= bio->bi_io_vec);
2265
2266         bio_put(bio);
2267 }
2268
2269 /*
2270  * after a readpage IO is done, we need to:
2271  * clear the uptodate bits on error
2272  * set the uptodate bits if things worked
2273  * set the page up to date if all extents in the tree are uptodate
2274  * clear the lock bit in the extent tree
2275  * unlock the page if there are no other extents locked for it
2276  *
2277  * Scheduling is not allowed, so the extent state tree is expected
2278  * to have one and only one object corresponding to this IO.
2279  */
2280 static void end_bio_extent_readpage(struct bio *bio, int err)
2281 {
2282         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2283         struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2284         struct bio_vec *bvec = bio->bi_io_vec;
2285         struct extent_io_tree *tree;
2286         u64 start;
2287         u64 end;
2288         int whole_page;
2289         int mirror;
2290         int ret;
2291
2292         if (err)
2293                 uptodate = 0;
2294
2295         do {
2296                 struct page *page = bvec->bv_page;
2297                 struct extent_state *cached = NULL;
2298                 struct extent_state *state;
2299
2300                 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2301                          "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2302                          (long int)bio->bi_bdev);
2303                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2304
2305                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2306                         bvec->bv_offset;
2307                 end = start + bvec->bv_len - 1;
2308
2309                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2310                         whole_page = 1;
2311                 else
2312                         whole_page = 0;
2313
2314                 if (++bvec <= bvec_end)
2315                         prefetchw(&bvec->bv_page->flags);
2316
2317                 spin_lock(&tree->lock);
2318                 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2319                 if (state && state->start == start) {
2320                         /*
2321                          * take a reference on the state, unlock will drop
2322                          * the ref
2323                          */
2324                         cache_state(state, &cached);
2325                 }
2326                 spin_unlock(&tree->lock);
2327
2328                 mirror = (int)(unsigned long)bio->bi_bdev;
2329                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2330                         ret = tree->ops->readpage_end_io_hook(page, start, end,
2331                                                               state, mirror);
2332                         if (ret) {
2333                                 /* no IO indicated but software detected errors
2334                                  * in the block, either checksum errors or
2335                                  * issues with the contents */
2336                                 struct btrfs_root *root =
2337                                         BTRFS_I(page->mapping->host)->root;
2338                                 struct btrfs_device *device;
2339
2340                                 uptodate = 0;
2341                                 device = btrfs_find_device_for_logical(
2342                                                 root, start, mirror);
2343                                 if (device)
2344                                         btrfs_dev_stat_inc_and_print(device,
2345                                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2346                         } else {
2347                                 clean_io_failure(start, page);
2348                         }
2349                 }
2350
2351                 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2352                         ret = tree->ops->readpage_io_failed_hook(page, mirror);
2353                         if (!ret && !err &&
2354                             test_bit(BIO_UPTODATE, &bio->bi_flags))
2355                                 uptodate = 1;
2356                 } else if (!uptodate) {
2357                         /*
2358                          * The generic bio_readpage_error handles errors the
2359                          * following way: If possible, new read requests are
2360                          * created and submitted and will end up in
2361                          * end_bio_extent_readpage as well (if we're lucky, not
2362                          * in the !uptodate case). In that case it returns 0 and
2363                          * we just go on with the next page in our bio. If it
2364                          * can't handle the error it will return -EIO and we
2365                          * remain responsible for that page.
2366                          */
2367                         ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2368                         if (ret == 0) {
2369                                 uptodate =
2370                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
2371                                 if (err)
2372                                         uptodate = 0;
2373                                 uncache_state(&cached);
2374                                 continue;
2375                         }
2376                 }
2377
2378                 if (uptodate && tree->track_uptodate) {
2379                         set_extent_uptodate(tree, start, end, &cached,
2380                                             GFP_ATOMIC);
2381                 }
2382                 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2383
2384                 if (whole_page) {
2385                         if (uptodate) {
2386                                 SetPageUptodate(page);
2387                         } else {
2388                                 ClearPageUptodate(page);
2389                                 SetPageError(page);
2390                         }
2391                         unlock_page(page);
2392                 } else {
2393                         if (uptodate) {
2394                                 check_page_uptodate(tree, page);
2395                         } else {
2396                                 ClearPageUptodate(page);
2397                                 SetPageError(page);
2398                         }
2399                         check_page_locked(tree, page);
2400                 }
2401         } while (bvec <= bvec_end);
2402
2403         bio_put(bio);
2404 }
2405
2406 struct bio *
2407 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2408                 gfp_t gfp_flags)
2409 {
2410         struct bio *bio;
2411
2412         bio = bio_alloc(gfp_flags, nr_vecs);
2413
2414         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2415                 while (!bio && (nr_vecs /= 2))
2416                         bio = bio_alloc(gfp_flags, nr_vecs);
2417         }
2418
2419         if (bio) {
2420                 bio->bi_size = 0;
2421                 bio->bi_bdev = bdev;
2422                 bio->bi_sector = first_sector;
2423         }
2424         return bio;
2425 }
2426
2427 /*
2428  * Since writes are async, they will only return -ENOMEM.
2429  * Reads can return the full range of I/O error conditions.
2430  */
2431 static int __must_check submit_one_bio(int rw, struct bio *bio,
2432                                        int mirror_num, unsigned long bio_flags)
2433 {
2434         int ret = 0;
2435         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2436         struct page *page = bvec->bv_page;
2437         struct extent_io_tree *tree = bio->bi_private;
2438         u64 start;
2439
2440         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2441
2442         bio->bi_private = NULL;
2443
2444         bio_get(bio);
2445
2446         if (tree->ops && tree->ops->submit_bio_hook)
2447                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2448                                            mirror_num, bio_flags, start);
2449         else
2450                 btrfsic_submit_bio(rw, bio);
2451
2452         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2453                 ret = -EOPNOTSUPP;
2454         bio_put(bio);
2455         return ret;
2456 }
2457
2458 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2459                      unsigned long offset, size_t size, struct bio *bio,
2460                      unsigned long bio_flags)
2461 {
2462         int ret = 0;
2463         if (tree->ops && tree->ops->merge_bio_hook)
2464                 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2465                                                 bio_flags);
2466         BUG_ON(ret < 0);
2467         return ret;
2468
2469 }
2470
2471 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2472                               struct page *page, sector_t sector,
2473                               size_t size, unsigned long offset,
2474                               struct block_device *bdev,
2475                               struct bio **bio_ret,
2476                               unsigned long max_pages,
2477                               bio_end_io_t end_io_func,
2478                               int mirror_num,
2479                               unsigned long prev_bio_flags,
2480                               unsigned long bio_flags)
2481 {
2482         int ret = 0;
2483         struct bio *bio;
2484         int nr;
2485         int contig = 0;
2486         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2487         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2488         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2489
2490         if (bio_ret && *bio_ret) {
2491                 bio = *bio_ret;
2492                 if (old_compressed)
2493                         contig = bio->bi_sector == sector;
2494                 else
2495                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
2496                                 sector;
2497
2498                 if (prev_bio_flags != bio_flags || !contig ||
2499                     merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2500                     bio_add_page(bio, page, page_size, offset) < page_size) {
2501                         ret = submit_one_bio(rw, bio, mirror_num,
2502                                              prev_bio_flags);
2503                         if (ret < 0)
2504                                 return ret;
2505                         bio = NULL;
2506                 } else {
2507                         return 0;
2508                 }
2509         }
2510         if (this_compressed)
2511                 nr = BIO_MAX_PAGES;
2512         else
2513                 nr = bio_get_nr_vecs(bdev);
2514
2515         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2516         if (!bio)
2517                 return -ENOMEM;
2518
2519         bio_add_page(bio, page, page_size, offset);
2520         bio->bi_end_io = end_io_func;
2521         bio->bi_private = tree;
2522
2523         if (bio_ret)
2524                 *bio_ret = bio;
2525         else
2526                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2527
2528         return ret;
2529 }
2530
2531 void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2532 {
2533         if (!PagePrivate(page)) {
2534                 SetPagePrivate(page);
2535                 page_cache_get(page);
2536                 set_page_private(page, (unsigned long)eb);
2537         } else {
2538                 WARN_ON(page->private != (unsigned long)eb);
2539         }
2540 }
2541
2542 void set_page_extent_mapped(struct page *page)
2543 {
2544         if (!PagePrivate(page)) {
2545                 SetPagePrivate(page);
2546                 page_cache_get(page);
2547                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2548         }
2549 }
2550
2551 /*
2552  * basic readpage implementation.  Locked extent state structs are inserted
2553  * into the tree that are removed when the IO is done (by the end_io
2554  * handlers)
2555  * XXX JDM: This needs looking at to ensure proper page locking
2556  */
2557 static int __extent_read_full_page(struct extent_io_tree *tree,
2558                                    struct page *page,
2559                                    get_extent_t *get_extent,
2560                                    struct bio **bio, int mirror_num,
2561                                    unsigned long *bio_flags)
2562 {
2563         struct inode *inode = page->mapping->host;
2564         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2565         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2566         u64 end;
2567         u64 cur = start;
2568         u64 extent_offset;
2569         u64 last_byte = i_size_read(inode);
2570         u64 block_start;
2571         u64 cur_end;
2572         sector_t sector;
2573         struct extent_map *em;
2574         struct block_device *bdev;
2575         struct btrfs_ordered_extent *ordered;
2576         int ret;
2577         int nr = 0;
2578         size_t pg_offset = 0;
2579         size_t iosize;
2580         size_t disk_io_size;
2581         size_t blocksize = inode->i_sb->s_blocksize;
2582         unsigned long this_bio_flag = 0;
2583
2584         set_page_extent_mapped(page);
2585
2586         if (!PageUptodate(page)) {
2587                 if (cleancache_get_page(page) == 0) {
2588                         BUG_ON(blocksize != PAGE_SIZE);
2589                         goto out;
2590                 }
2591         }
2592
2593         end = page_end;
2594         while (1) {
2595                 lock_extent(tree, start, end);
2596                 ordered = btrfs_lookup_ordered_extent(inode, start);
2597                 if (!ordered)
2598                         break;
2599                 unlock_extent(tree, start, end);
2600                 btrfs_start_ordered_extent(inode, ordered, 1);
2601                 btrfs_put_ordered_extent(ordered);
2602         }
2603
2604         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2605                 char *userpage;
2606                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2607
2608                 if (zero_offset) {
2609                         iosize = PAGE_CACHE_SIZE - zero_offset;
2610                         userpage = kmap_atomic(page);
2611                         memset(userpage + zero_offset, 0, iosize);
2612                         flush_dcache_page(page);
2613                         kunmap_atomic(userpage);
2614                 }
2615         }
2616         while (cur <= end) {
2617                 if (cur >= last_byte) {
2618                         char *userpage;
2619                         struct extent_state *cached = NULL;
2620
2621                         iosize = PAGE_CACHE_SIZE - pg_offset;
2622                         userpage = kmap_atomic(page);
2623                         memset(userpage + pg_offset, 0, iosize);
2624                         flush_dcache_page(page);
2625                         kunmap_atomic(userpage);
2626                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2627                                             &cached, GFP_NOFS);
2628                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2629                                              &cached, GFP_NOFS);
2630                         break;
2631                 }
2632                 em = get_extent(inode, page, pg_offset, cur,
2633                                 end - cur + 1, 0);
2634                 if (IS_ERR_OR_NULL(em)) {
2635                         SetPageError(page);
2636                         unlock_extent(tree, cur, end);
2637                         break;
2638                 }
2639                 extent_offset = cur - em->start;
2640                 BUG_ON(extent_map_end(em) <= cur);
2641                 BUG_ON(end < cur);
2642
2643                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2644                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2645                         extent_set_compress_type(&this_bio_flag,
2646                                                  em->compress_type);
2647                 }
2648
2649                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2650                 cur_end = min(extent_map_end(em) - 1, end);
2651                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2652                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2653                         disk_io_size = em->block_len;
2654                         sector = em->block_start >> 9;
2655                 } else {
2656                         sector = (em->block_start + extent_offset) >> 9;
2657                         disk_io_size = iosize;
2658                 }
2659                 bdev = em->bdev;
2660                 block_start = em->block_start;
2661                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2662                         block_start = EXTENT_MAP_HOLE;
2663                 free_extent_map(em);
2664                 em = NULL;
2665
2666                 /* we've found a hole, just zero and go on */
2667                 if (block_start == EXTENT_MAP_HOLE) {
2668                         char *userpage;
2669                         struct extent_state *cached = NULL;
2670
2671                         userpage = kmap_atomic(page);
2672                         memset(userpage + pg_offset, 0, iosize);
2673                         flush_dcache_page(page);
2674                         kunmap_atomic(userpage);
2675
2676                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2677                                             &cached, GFP_NOFS);
2678                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2679                                              &cached, GFP_NOFS);
2680                         cur = cur + iosize;
2681                         pg_offset += iosize;
2682                         continue;
2683                 }
2684                 /* the get_extent function already copied into the page */
2685                 if (test_range_bit(tree, cur, cur_end,
2686                                    EXTENT_UPTODATE, 1, NULL)) {
2687                         check_page_uptodate(tree, page);
2688                         unlock_extent(tree, cur, cur + iosize - 1);
2689                         cur = cur + iosize;
2690                         pg_offset += iosize;
2691                         continue;
2692                 }
2693                 /* we have an inline extent but it didn't get marked up
2694                  * to date.  Error out
2695                  */
2696                 if (block_start == EXTENT_MAP_INLINE) {
2697                         SetPageError(page);
2698                         unlock_extent(tree, cur, cur + iosize - 1);
2699                         cur = cur + iosize;
2700                         pg_offset += iosize;
2701                         continue;
2702                 }
2703
2704                 ret = 0;
2705                 if (tree->ops && tree->ops->readpage_io_hook) {
2706                         ret = tree->ops->readpage_io_hook(page, cur,
2707                                                           cur + iosize - 1);
2708                 }
2709                 if (!ret) {
2710                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2711                         pnr -= page->index;
2712                         ret = submit_extent_page(READ, tree, page,
2713                                          sector, disk_io_size, pg_offset,
2714                                          bdev, bio, pnr,
2715                                          end_bio_extent_readpage, mirror_num,
2716                                          *bio_flags,
2717                                          this_bio_flag);
2718                         BUG_ON(ret == -ENOMEM);
2719                         nr++;
2720                         *bio_flags = this_bio_flag;
2721                 }
2722                 if (ret)
2723                         SetPageError(page);
2724                 cur = cur + iosize;
2725                 pg_offset += iosize;
2726         }
2727 out:
2728         if (!nr) {
2729                 if (!PageError(page))
2730                         SetPageUptodate(page);
2731                 unlock_page(page);
2732         }
2733         return 0;
2734 }
2735
2736 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2737                             get_extent_t *get_extent, int mirror_num)
2738 {
2739         struct bio *bio = NULL;
2740         unsigned long bio_flags = 0;
2741         int ret;
2742
2743         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2744                                       &bio_flags);
2745         if (bio)
2746                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2747         return ret;
2748 }
2749
2750 static noinline void update_nr_written(struct page *page,
2751                                       struct writeback_control *wbc,
2752                                       unsigned long nr_written)
2753 {
2754         wbc->nr_to_write -= nr_written;
2755         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2756             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2757                 page->mapping->writeback_index = page->index + nr_written;
2758 }
2759
2760 /*
2761  * the writepage semantics are similar to regular writepage.  extent
2762  * records are inserted to lock ranges in the tree, and as dirty areas
2763  * are found, they are marked writeback.  Then the lock bits are removed
2764  * and the end_io handler clears the writeback ranges
2765  */
2766 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2767                               void *data)
2768 {
2769         struct inode *inode = page->mapping->host;
2770         struct extent_page_data *epd = data;
2771         struct extent_io_tree *tree = epd->tree;
2772         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2773         u64 delalloc_start;
2774         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2775         u64 end;
2776         u64 cur = start;
2777         u64 extent_offset;
2778         u64 last_byte = i_size_read(inode);
2779         u64 block_start;
2780         u64 iosize;
2781         sector_t sector;
2782         struct extent_state *cached_state = NULL;
2783         struct extent_map *em;
2784         struct block_device *bdev;
2785         int ret;
2786         int nr = 0;
2787         size_t pg_offset = 0;
2788         size_t blocksize;
2789         loff_t i_size = i_size_read(inode);
2790         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2791         u64 nr_delalloc;
2792         u64 delalloc_end;
2793         int page_started;
2794         int compressed;
2795         int write_flags;
2796         unsigned long nr_written = 0;
2797         bool fill_delalloc = true;
2798
2799         if (wbc->sync_mode == WB_SYNC_ALL)
2800                 write_flags = WRITE_SYNC;
2801         else
2802                 write_flags = WRITE;
2803
2804         trace___extent_writepage(page, inode, wbc);
2805
2806         WARN_ON(!PageLocked(page));
2807
2808         ClearPageError(page);
2809
2810         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2811         if (page->index > end_index ||
2812            (page->index == end_index && !pg_offset)) {
2813                 page->mapping->a_ops->invalidatepage(page, 0);
2814                 unlock_page(page);
2815                 return 0;
2816         }
2817
2818         if (page->index == end_index) {
2819                 char *userpage;
2820
2821                 userpage = kmap_atomic(page);
2822                 memset(userpage + pg_offset, 0,
2823                        PAGE_CACHE_SIZE - pg_offset);
2824                 kunmap_atomic(userpage);
2825                 flush_dcache_page(page);
2826         }
2827         pg_offset = 0;
2828
2829         set_page_extent_mapped(page);
2830
2831         if (!tree->ops || !tree->ops->fill_delalloc)
2832                 fill_delalloc = false;
2833
2834         delalloc_start = start;
2835         delalloc_end = 0;
2836         page_started = 0;
2837         if (!epd->extent_locked && fill_delalloc) {
2838                 u64 delalloc_to_write = 0;
2839                 /*
2840                  * make sure the wbc mapping index is at least updated
2841                  * to this page.
2842                  */
2843                 update_nr_written(page, wbc, 0);
2844
2845                 while (delalloc_end < page_end) {
2846                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2847                                                        page,
2848                                                        &delalloc_start,
2849                                                        &delalloc_end,
2850                                                        128 * 1024 * 1024);
2851                         if (nr_delalloc == 0) {
2852                                 delalloc_start = delalloc_end + 1;
2853                                 continue;
2854                         }
2855                         ret = tree->ops->fill_delalloc(inode, page,
2856                                                        delalloc_start,
2857                                                        delalloc_end,
2858                                                        &page_started,
2859                                                        &nr_written);
2860                         /* File system has been set read-only */
2861                         if (ret) {
2862                                 SetPageError(page);
2863                                 goto done;
2864                         }
2865                         /*
2866                          * delalloc_end is already one less than the total
2867                          * length, so we don't subtract one from
2868                          * PAGE_CACHE_SIZE
2869                          */
2870                         delalloc_to_write += (delalloc_end - delalloc_start +
2871                                               PAGE_CACHE_SIZE) >>
2872                                               PAGE_CACHE_SHIFT;
2873                         delalloc_start = delalloc_end + 1;
2874                 }
2875                 if (wbc->nr_to_write < delalloc_to_write) {
2876                         int thresh = 8192;
2877
2878                         if (delalloc_to_write < thresh * 2)
2879                                 thresh = delalloc_to_write;
2880                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
2881                                                  thresh);
2882                 }
2883
2884                 /* did the fill delalloc function already unlock and start
2885                  * the IO?
2886                  */
2887                 if (page_started) {
2888                         ret = 0;
2889                         /*
2890                          * we've unlocked the page, so we can't update
2891                          * the mapping's writeback index, just update
2892                          * nr_to_write.
2893                          */
2894                         wbc->nr_to_write -= nr_written;
2895                         goto done_unlocked;
2896                 }
2897         }
2898         if (tree->ops && tree->ops->writepage_start_hook) {
2899                 ret = tree->ops->writepage_start_hook(page, start,
2900                                                       page_end);
2901                 if (ret) {
2902                         /* Fixup worker will requeue */
2903                         if (ret == -EBUSY)
2904                                 wbc->pages_skipped++;
2905                         else
2906                                 redirty_page_for_writepage(wbc, page);
2907                         update_nr_written(page, wbc, nr_written);
2908                         unlock_page(page);
2909                         ret = 0;
2910                         goto done_unlocked;
2911                 }
2912         }
2913
2914         /*
2915          * we don't want to touch the inode after unlocking the page,
2916          * so we update the mapping writeback index now
2917          */
2918         update_nr_written(page, wbc, nr_written + 1);
2919
2920         end = page_end;
2921         if (last_byte <= start) {
2922                 if (tree->ops && tree->ops->writepage_end_io_hook)
2923                         tree->ops->writepage_end_io_hook(page, start,
2924                                                          page_end, NULL, 1);
2925                 goto done;
2926         }
2927
2928         blocksize = inode->i_sb->s_blocksize;
2929
2930         while (cur <= end) {
2931                 if (cur >= last_byte) {
2932                         if (tree->ops && tree->ops->writepage_end_io_hook)
2933                                 tree->ops->writepage_end_io_hook(page, cur,
2934                                                          page_end, NULL, 1);
2935                         break;
2936                 }
2937                 em = epd->get_extent(inode, page, pg_offset, cur,
2938                                      end - cur + 1, 1);
2939                 if (IS_ERR_OR_NULL(em)) {
2940                         SetPageError(page);
2941                         break;
2942                 }
2943
2944                 extent_offset = cur - em->start;
2945                 BUG_ON(extent_map_end(em) <= cur);
2946                 BUG_ON(end < cur);
2947                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2948                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2949                 sector = (em->block_start + extent_offset) >> 9;
2950                 bdev = em->bdev;
2951                 block_start = em->block_start;
2952                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2953                 free_extent_map(em);
2954                 em = NULL;
2955
2956                 /*
2957                  * compressed and inline extents are written through other
2958                  * paths in the FS
2959                  */
2960                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2961                     block_start == EXTENT_MAP_INLINE) {
2962                         /*
2963                          * end_io notification does not happen here for
2964                          * compressed extents
2965                          */
2966                         if (!compressed && tree->ops &&
2967                             tree->ops->writepage_end_io_hook)
2968                                 tree->ops->writepage_end_io_hook(page, cur,
2969                                                          cur + iosize - 1,
2970                                                          NULL, 1);
2971                         else if (compressed) {
2972                                 /* we don't want to end_page_writeback on
2973                                  * a compressed extent.  this happens
2974                                  * elsewhere
2975                                  */
2976                                 nr++;
2977                         }
2978
2979                         cur += iosize;
2980                         pg_offset += iosize;
2981                         continue;
2982                 }
2983                 /* leave this out until we have a page_mkwrite call */
2984                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2985                                    EXTENT_DIRTY, 0, NULL)) {
2986                         cur = cur + iosize;
2987                         pg_offset += iosize;
2988                         continue;
2989                 }
2990
2991                 if (tree->ops && tree->ops->writepage_io_hook) {
2992                         ret = tree->ops->writepage_io_hook(page, cur,
2993                                                 cur + iosize - 1);
2994                 } else {
2995                         ret = 0;
2996                 }
2997                 if (ret) {
2998                         SetPageError(page);
2999                 } else {
3000                         unsigned long max_nr = end_index + 1;
3001
3002                         set_range_writeback(tree, cur, cur + iosize - 1);
3003                         if (!PageWriteback(page)) {
3004                                 printk(KERN_ERR "btrfs warning page %lu not "
3005                                        "writeback, cur %llu end %llu\n",
3006                                        page->index, (unsigned long long)cur,
3007                                        (unsigned long long)end);
3008                         }
3009
3010                         ret = submit_extent_page(write_flags, tree, page,
3011                                                  sector, iosize, pg_offset,
3012                                                  bdev, &epd->bio, max_nr,
3013                                                  end_bio_extent_writepage,
3014                                                  0, 0, 0);
3015                         if (ret)
3016                                 SetPageError(page);
3017                 }
3018                 cur = cur + iosize;
3019                 pg_offset += iosize;
3020                 nr++;
3021         }
3022 done:
3023         if (nr == 0) {
3024                 /* make sure the mapping tag for page dirty gets cleared */
3025                 set_page_writeback(page);
3026                 end_page_writeback(page);
3027         }
3028         unlock_page(page);
3029
3030 done_unlocked:
3031
3032         /* drop our reference on any cached states */
3033         free_extent_state(cached_state);
3034         return 0;
3035 }
3036
3037 static int eb_wait(void *word)
3038 {
3039         io_schedule();
3040         return 0;
3041 }
3042
3043 static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3044 {
3045         wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3046                     TASK_UNINTERRUPTIBLE);
3047 }
3048
3049 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3050                                      struct btrfs_fs_info *fs_info,
3051                                      struct extent_page_data *epd)
3052 {
3053         unsigned long i, num_pages;
3054         int flush = 0;
3055         int ret = 0;
3056
3057         if (!btrfs_try_tree_write_lock(eb)) {
3058                 flush = 1;
3059                 flush_write_bio(epd);
3060                 btrfs_tree_lock(eb);
3061         }
3062
3063         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3064                 btrfs_tree_unlock(eb);
3065                 if (!epd->sync_io)
3066                         return 0;
3067                 if (!flush) {
3068                         flush_write_bio(epd);
3069                         flush = 1;
3070                 }
3071                 while (1) {
3072                         wait_on_extent_buffer_writeback(eb);
3073                         btrfs_tree_lock(eb);
3074                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3075                                 break;
3076                         btrfs_tree_unlock(eb);
3077                 }
3078         }
3079
3080         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3081                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3082                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3083                 spin_lock(&fs_info->delalloc_lock);
3084                 if (fs_info->dirty_metadata_bytes >= eb->len)
3085                         fs_info->dirty_metadata_bytes -= eb->len;
3086                 else
3087                         WARN_ON(1);
3088                 spin_unlock(&fs_info->delalloc_lock);
3089                 ret = 1;
3090         }
3091
3092         btrfs_tree_unlock(eb);
3093
3094         if (!ret)
3095                 return ret;
3096
3097         num_pages = num_extent_pages(eb->start, eb->len);
3098         for (i = 0; i < num_pages; i++) {
3099                 struct page *p = extent_buffer_page(eb, i);
3100
3101                 if (!trylock_page(p)) {
3102                         if (!flush) {
3103                                 flush_write_bio(epd);
3104                                 flush = 1;
3105                         }
3106                         lock_page(p);
3107                 }
3108         }
3109
3110         return ret;
3111 }
3112
3113 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3114 {
3115         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3116         smp_mb__after_clear_bit();
3117         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3118 }
3119
3120 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3121 {
3122         int uptodate = err == 0;
3123         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3124         struct extent_buffer *eb;
3125         int done;
3126
3127         do {
3128                 struct page *page = bvec->bv_page;
3129
3130                 bvec--;
3131                 eb = (struct extent_buffer *)page->private;
3132                 BUG_ON(!eb);
3133                 done = atomic_dec_and_test(&eb->io_pages);
3134
3135                 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3136                         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3137                         ClearPageUptodate(page);
3138                         SetPageError(page);
3139                 }
3140
3141                 end_page_writeback(page);
3142
3143                 if (!done)
3144                         continue;
3145
3146                 end_extent_buffer_writeback(eb);
3147         } while (bvec >= bio->bi_io_vec);
3148
3149         bio_put(bio);
3150
3151 }
3152
3153 static int write_one_eb(struct extent_buffer *eb,
3154                         struct btrfs_fs_info *fs_info,
3155                         struct writeback_control *wbc,
3156                         struct extent_page_data *epd)
3157 {
3158         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3159         u64 offset = eb->start;
3160         unsigned long i, num_pages;
3161         int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3162         int ret = 0;
3163
3164         clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3165         num_pages = num_extent_pages(eb->start, eb->len);
3166         atomic_set(&eb->io_pages, num_pages);
3167         for (i = 0; i < num_pages; i++) {
3168                 struct page *p = extent_buffer_page(eb, i);
3169
3170                 clear_page_dirty_for_io(p);
3171                 set_page_writeback(p);
3172                 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3173                                          PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3174                                          -1, end_bio_extent_buffer_writepage,
3175                                          0, 0, 0);
3176                 if (ret) {
3177                         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3178                         SetPageError(p);
3179                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3180                                 end_extent_buffer_writeback(eb);
3181                         ret = -EIO;
3182                         break;
3183                 }
3184                 offset += PAGE_CACHE_SIZE;
3185                 update_nr_written(p, wbc, 1);
3186                 unlock_page(p);
3187         }
3188
3189         if (unlikely(ret)) {
3190                 for (; i < num_pages; i++) {
3191                         struct page *p = extent_buffer_page(eb, i);
3192                         unlock_page(p);
3193                 }
3194         }
3195
3196         return ret;
3197 }
3198
3199 int btree_write_cache_pages(struct address_space *mapping,
3200                                    struct writeback_control *wbc)
3201 {
3202         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3203         struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3204         struct extent_buffer *eb, *prev_eb = NULL;
3205         struct extent_page_data epd = {
3206                 .bio = NULL,
3207                 .tree = tree,
3208                 .extent_locked = 0,
3209                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3210         };
3211         int ret = 0;
3212         int done = 0;
3213         int nr_to_write_done = 0;
3214         struct pagevec pvec;
3215         int nr_pages;
3216         pgoff_t index;
3217         pgoff_t end;            /* Inclusive */
3218         int scanned = 0;
3219         int tag;
3220
3221         pagevec_init(&pvec, 0);
3222         if (wbc->range_cyclic) {
3223                 index = mapping->writeback_index; /* Start from prev offset */
3224                 end = -1;
3225         } else {
3226                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3227                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3228                 scanned = 1;
3229         }
3230         if (wbc->sync_mode == WB_SYNC_ALL)
3231                 tag = PAGECACHE_TAG_TOWRITE;
3232         else
3233                 tag = PAGECACHE_TAG_DIRTY;
3234 retry:
3235         if (wbc->sync_mode == WB_SYNC_ALL)
3236                 tag_pages_for_writeback(mapping, index, end);
3237         while (!done && !nr_to_write_done && (index <= end) &&
3238                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3239                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3240                 unsigned i;
3241
3242                 scanned = 1;
3243                 for (i = 0; i < nr_pages; i++) {
3244                         struct page *page = pvec.pages[i];
3245
3246                         if (!PagePrivate(page))
3247                                 continue;
3248
3249                         if (!wbc->range_cyclic && page->index > end) {
3250                                 done = 1;
3251                                 break;
3252                         }
3253
3254                         eb = (struct extent_buffer *)page->private;
3255                         if (!eb) {
3256                                 WARN_ON(1);
3257                                 continue;
3258                         }
3259
3260                         if (eb == prev_eb)
3261                                 continue;
3262
3263                         if (!atomic_inc_not_zero(&eb->refs)) {
3264                                 WARN_ON(1);
3265                                 continue;
3266                         }
3267
3268                         prev_eb = eb;
3269                         ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3270                         if (!ret) {
3271                                 free_extent_buffer(eb);
3272                                 continue;
3273                         }
3274
3275                         ret = write_one_eb(eb, fs_info, wbc, &epd);
3276                         if (ret) {
3277                                 done = 1;
3278                                 free_extent_buffer(eb);
3279                                 break;
3280                         }
3281                         free_extent_buffer(eb);
3282
3283                         /*
3284                          * the filesystem may choose to bump up nr_to_write.
3285                          * We have to make sure to honor the new nr_to_write
3286                          * at any time
3287                          */
3288                         nr_to_write_done = wbc->nr_to_write <= 0;
3289                 }
3290                 pagevec_release(&pvec);
3291                 cond_resched();
3292         }
3293         if (!scanned && !done) {
3294                 /*
3295                  * We hit the last page and there is more work to be done: wrap
3296                  * back to the start of the file
3297                  */
3298                 scanned = 1;
3299                 index = 0;
3300                 goto retry;
3301         }
3302         flush_write_bio(&epd);
3303         return ret;
3304 }
3305
3306 /**
3307  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3308  * @mapping: address space structure to write
3309  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3310  * @writepage: function called for each page
3311  * @data: data passed to writepage function
3312  *
3313  * If a page is already under I/O, write_cache_pages() skips it, even
3314  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3315  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3316  * and msync() need to guarantee that all the data which was dirty at the time
3317  * the call was made get new I/O started against them.  If wbc->sync_mode is
3318  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3319  * existing IO to complete.
3320  */
3321 static int extent_write_cache_pages(struct extent_io_tree *tree,
3322                              struct address_space *mapping,
3323                              struct writeback_control *wbc,
3324                              writepage_t writepage, void *data,
3325                              void (*flush_fn)(void *))
3326 {
3327         struct inode *inode = mapping->host;
3328         int ret = 0;
3329         int done = 0;
3330         int nr_to_write_done = 0;
3331         struct pagevec pvec;
3332         int nr_pages;
3333         pgoff_t index;
3334         pgoff_t end;            /* Inclusive */
3335         int scanned = 0;
3336         int tag;
3337
3338         /*
3339          * We have to hold onto the inode so that ordered extents can do their
3340          * work when the IO finishes.  The alternative to this is failing to add
3341          * an ordered extent if the igrab() fails there and that is a huge pain
3342          * to deal with, so instead just hold onto the inode throughout the
3343          * writepages operation.  If it fails here we are freeing up the inode
3344          * anyway and we'd rather not waste our time writing out stuff that is
3345          * going to be truncated anyway.
3346          */
3347         if (!igrab(inode))
3348                 return 0;
3349
3350         pagevec_init(&pvec, 0);
3351         if (wbc->range_cyclic) {
3352                 index = mapping->writeback_index; /* Start from prev offset */
3353                 end = -1;
3354         } else {
3355                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3356                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3357                 scanned = 1;
3358         }
3359         if (wbc->sync_mode == WB_SYNC_ALL)
3360                 tag = PAGECACHE_TAG_TOWRITE;
3361         else
3362                 tag = PAGECACHE_TAG_DIRTY;
3363 retry:
3364         if (wbc->sync_mode == WB_SYNC_ALL)
3365                 tag_pages_for_writeback(mapping, index, end);
3366         while (!done && !nr_to_write_done && (index <= end) &&
3367                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3368                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3369                 unsigned i;
3370
3371                 scanned = 1;
3372                 for (i = 0; i < nr_pages; i++) {
3373                         struct page *page = pvec.pages[i];
3374
3375                         /*
3376                          * At this point we hold neither mapping->tree_lock nor
3377                          * lock on the page itself: the page may be truncated or
3378                          * invalidated (changing page->mapping to NULL), or even
3379                          * swizzled back from swapper_space to tmpfs file
3380                          * mapping
3381                          */
3382                         if (tree->ops &&
3383                             tree->ops->write_cache_pages_lock_hook) {
3384                                 tree->ops->write_cache_pages_lock_hook(page,
3385                                                                data, flush_fn);
3386                         } else {
3387                                 if (!trylock_page(page)) {
3388                                         flush_fn(data);
3389                                         lock_page(page);
3390                                 }
3391                         }
3392
3393                         if (unlikely(page->mapping != mapping)) {
3394                                 unlock_page(page);
3395                                 continue;
3396                         }
3397
3398                         if (!wbc->range_cyclic && page->index > end) {
3399                                 done = 1;
3400                                 unlock_page(page);
3401                                 continue;
3402                         }
3403
3404                         if (wbc->sync_mode != WB_SYNC_NONE) {
3405                                 if (PageWriteback(page))
3406                                         flush_fn(data);
3407                                 wait_on_page_writeback(page);
3408                         }
3409
3410                         if (PageWriteback(page) ||
3411                             !clear_page_dirty_for_io(page)) {
3412                                 unlock_page(page);
3413                                 continue;
3414                         }
3415
3416                         ret = (*writepage)(page, wbc, data);
3417
3418                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3419                                 unlock_page(page);
3420                                 ret = 0;
3421                         }
3422                         if (ret)
3423                                 done = 1;
3424
3425                         /*
3426                          * the filesystem may choose to bump up nr_to_write.
3427                          * We have to make sure to honor the new nr_to_write
3428                          * at any time
3429                          */
3430                         nr_to_write_done = wbc->nr_to_write <= 0;
3431                 }
3432                 pagevec_release(&pvec);
3433                 cond_resched();
3434         }
3435         if (!scanned && !done) {
3436                 /*
3437                  * We hit the last page and there is more work to be done: wrap
3438                  * back to the start of the file
3439                  */
3440                 scanned = 1;
3441                 index = 0;
3442                 goto retry;
3443         }
3444         btrfs_add_delayed_iput(inode);
3445         return ret;
3446 }
3447
3448 static void flush_epd_write_bio(struct extent_page_data *epd)
3449 {
3450         if (epd->bio) {
3451                 int rw = WRITE;
3452                 int ret;
3453
3454                 if (epd->sync_io)
3455                         rw = WRITE_SYNC;
3456
3457                 ret = submit_one_bio(rw, epd->bio, 0, 0);
3458                 BUG_ON(ret < 0); /* -ENOMEM */
3459                 epd->bio = NULL;
3460         }
3461 }
3462
3463 static noinline void flush_write_bio(void *data)
3464 {
3465         struct extent_page_data *epd = data;
3466         flush_epd_write_bio(epd);
3467 }
3468
3469 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3470                           get_extent_t *get_extent,
3471                           struct writeback_control *wbc)
3472 {
3473         int ret;
3474         struct extent_page_data epd = {
3475                 .bio = NULL,
3476                 .tree = tree,
3477                 .get_extent = get_extent,
3478                 .extent_locked = 0,
3479                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3480         };
3481
3482         ret = __extent_writepage(page, wbc, &epd);
3483
3484         flush_epd_write_bio(&epd);
3485         return ret;
3486 }
3487
3488 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3489                               u64 start, u64 end, get_extent_t *get_extent,
3490                               int mode)
3491 {
3492         int ret = 0;
3493         struct address_space *mapping = inode->i_mapping;
3494         struct page *page;
3495         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3496                 PAGE_CACHE_SHIFT;
3497
3498         struct extent_page_data epd = {
3499                 .bio = NULL,
3500                 .tree = tree,
3501                 .get_extent = get_extent,
3502                 .extent_locked = 1,
3503                 .sync_io = mode == WB_SYNC_ALL,
3504         };
3505         struct writeback_control wbc_writepages = {
3506                 .sync_mode      = mode,
3507                 .nr_to_write    = nr_pages * 2,
3508                 .range_start    = start,
3509                 .range_end      = end + 1,
3510         };
3511
3512         while (start <= end) {
3513                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3514                 if (clear_page_dirty_for_io(page))
3515                         ret = __extent_writepage(page, &wbc_writepages, &epd);
3516                 else {
3517                         if (tree->ops && tree->ops->writepage_end_io_hook)
3518                                 tree->ops->writepage_end_io_hook(page, start,
3519                                                  start + PAGE_CACHE_SIZE - 1,
3520                                                  NULL, 1);
3521                         unlock_page(page);
3522                 }
3523                 page_cache_release(page);
3524                 start += PAGE_CACHE_SIZE;
3525         }
3526
3527         flush_epd_write_bio(&epd);
3528         return ret;
3529 }
3530
3531 int extent_writepages(struct extent_io_tree *tree,
3532                       struct address_space *mapping,
3533                       get_extent_t *get_extent,
3534                       struct writeback_control *wbc)
3535 {
3536         int ret = 0;
3537         struct extent_page_data epd = {
3538                 .bio = NULL,
3539                 .tree = tree,
3540                 .get_extent = get_extent,
3541                 .extent_locked = 0,
3542                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3543         };
3544
3545         ret = extent_write_cache_pages(tree, mapping, wbc,
3546                                        __extent_writepage, &epd,
3547                                        flush_write_bio);
3548         flush_epd_write_bio(&epd);
3549         return ret;
3550 }
3551
3552 int extent_readpages(struct extent_io_tree *tree,
3553                      struct address_space *mapping,
3554                      struct list_head *pages, unsigned nr_pages,
3555                      get_extent_t get_extent)
3556 {
3557         struct bio *bio = NULL;
3558         unsigned page_idx;
3559         unsigned long bio_flags = 0;
3560
3561         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3562                 struct page *page = list_entry(pages->prev, struct page, lru);
3563
3564                 prefetchw(&page->flags);
3565                 list_del(&page->lru);
3566                 if (!add_to_page_cache_lru(page, mapping,
3567                                         page->index, GFP_NOFS)) {
3568                         __extent_read_full_page(tree, page, get_extent,
3569                                                 &bio, 0, &bio_flags);
3570                 }
3571                 page_cache_release(page);
3572         }
3573         BUG_ON(!list_empty(pages));
3574         if (bio)
3575                 return submit_one_bio(READ, bio, 0, bio_flags);
3576         return 0;
3577 }
3578
3579 /*
3580  * basic invalidatepage code, this waits on any locked or writeback
3581  * ranges corresponding to the page, and then deletes any extent state
3582  * records from the tree
3583  */
3584 int extent_invalidatepage(struct extent_io_tree *tree,
3585                           struct page *page, unsigned long offset)
3586 {
3587         struct extent_state *cached_state = NULL;
3588         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3589         u64 end = start + PAGE_CACHE_SIZE - 1;
3590         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3591
3592         start += (offset + blocksize - 1) & ~(blocksize - 1);
3593         if (start > end)
3594                 return 0;
3595
3596         lock_extent_bits(tree, start, end, 0, &cached_state);
3597         wait_on_page_writeback(page);
3598         clear_extent_bit(tree, start, end,
3599                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3600                          EXTENT_DO_ACCOUNTING,
3601                          1, 1, &cached_state, GFP_NOFS);
3602         return 0;
3603 }
3604
3605 /*
3606  * a helper for releasepage, this tests for areas of the page that
3607  * are locked or under IO and drops the related state bits if it is safe
3608  * to drop the page.
3609  */
3610 int try_release_extent_state(struct extent_map_tree *map,
3611                              struct extent_io_tree *tree, struct page *page,
3612                              gfp_t mask)
3613 {
3614         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3615         u64 end = start + PAGE_CACHE_SIZE - 1;
3616         int ret = 1;
3617
3618         if (test_range_bit(tree, start, end,
3619                            EXTENT_IOBITS, 0, NULL))
3620                 ret = 0;
3621         else {
3622                 if ((mask & GFP_NOFS) == GFP_NOFS)
3623                         mask = GFP_NOFS;
3624                 /*
3625                  * at this point we can safely clear everything except the
3626                  * locked bit and the nodatasum bit
3627                  */
3628                 ret = clear_extent_bit(tree, start, end,
3629                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3630                                  0, 0, NULL, mask);
3631
3632                 /* if clear_extent_bit failed for enomem reasons,
3633                  * we can't allow the release to continue.
3634                  */
3635                 if (ret < 0)
3636                         ret = 0;
3637                 else
3638                         ret = 1;
3639         }
3640         return ret;
3641 }
3642
3643 /*
3644  * a helper for releasepage.  As long as there are no locked extents
3645  * in the range corresponding to the page, both state records and extent
3646  * map records are removed
3647  */
3648 int try_release_extent_mapping(struct extent_map_tree *map,
3649                                struct extent_io_tree *tree, struct page *page,
3650                                gfp_t mask)
3651 {
3652         struct extent_map *em;
3653         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3654         u64 end = start + PAGE_CACHE_SIZE - 1;
3655
3656         if ((mask & __GFP_WAIT) &&
3657             page->mapping->host->i_size > 16 * 1024 * 1024) {
3658                 u64 len;
3659                 while (start <= end) {
3660                         len = end - start + 1;
3661                         write_lock(&map->lock);
3662                         em = lookup_extent_mapping(map, start, len);
3663                         if (!em) {
3664                                 write_unlock(&map->lock);
3665                                 break;
3666                         }
3667                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3668                             em->start != start) {
3669                                 write_unlock(&map->lock);
3670                                 free_extent_map(em);
3671                                 break;
3672                         }
3673                         if (!test_range_bit(tree, em->start,
3674                                             extent_map_end(em) - 1,
3675                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
3676                                             0, NULL)) {
3677                                 remove_extent_mapping(map, em);
3678                                 /* once for the rb tree */
3679                                 free_extent_map(em);
3680                         }
3681                         start = extent_map_end(em);
3682                         write_unlock(&map->lock);
3683
3684                         /* once for us */
3685                         free_extent_map(em);
3686                 }
3687         }
3688         return try_release_extent_state(map, tree, page, mask);
3689 }
3690
3691 /*
3692  * helper function for fiemap, which doesn't want to see any holes.
3693  * This maps until we find something past 'last'
3694  */
3695 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3696                                                 u64 offset,
3697                                                 u64 last,
3698                                                 get_extent_t *get_extent)
3699 {
3700         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3701         struct extent_map *em;
3702         u64 len;
3703
3704         if (offset >= last)
3705                 return NULL;
3706
3707         while(1) {
3708                 len = last - offset;
3709                 if (len == 0)
3710                         break;
3711                 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3712                 em = get_extent(inode, NULL, 0, offset, len, 0);
3713                 if (IS_ERR_OR_NULL(em))
3714                         return em;
3715
3716                 /* if this isn't a hole return it */
3717                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3718                     em->block_start != EXTENT_MAP_HOLE) {
3719                         return em;
3720                 }
3721
3722                 /* this is a hole, advance to the next extent */
3723                 offset = extent_map_end(em);
3724                 free_extent_map(em);
3725                 if (offset >= last)
3726                         break;
3727         }
3728         return NULL;
3729 }
3730
3731 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3732                 __u64 start, __u64 len, get_extent_t *get_extent)
3733 {
3734         int ret = 0;
3735         u64 off = start;
3736         u64 max = start + len;
3737         u32 flags = 0;
3738         u32 found_type;
3739         u64 last;
3740         u64 last_for_get_extent = 0;
3741         u64 disko = 0;
3742         u64 isize = i_size_read(inode);
3743         struct btrfs_key found_key;
3744         struct extent_map *em = NULL;
3745         struct extent_state *cached_state = NULL;
3746         struct btrfs_path *path;
3747         struct btrfs_file_extent_item *item;
3748         int end = 0;
3749         u64 em_start = 0;
3750         u64 em_len = 0;
3751         u64 em_end = 0;
3752         unsigned long emflags;
3753
3754         if (len == 0)
3755                 return -EINVAL;
3756
3757         path = btrfs_alloc_path();
3758         if (!path)
3759                 return -ENOMEM;
3760         path->leave_spinning = 1;
3761
3762         start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3763         len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3764
3765         /*
3766          * lookup the last file extent.  We're not using i_size here
3767          * because there might be preallocation past i_size
3768          */
3769         ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3770                                        path, btrfs_ino(inode), -1, 0);
3771         if (ret < 0) {
3772                 btrfs_free_path(path);
3773                 return ret;
3774         }
3775         WARN_ON(!ret);
3776         path->slots[0]--;
3777         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3778                               struct btrfs_file_extent_item);
3779         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3780         found_type = btrfs_key_type(&found_key);
3781
3782         /* No extents, but there might be delalloc bits */
3783         if (found_key.objectid != btrfs_ino(inode) ||
3784             found_type != BTRFS_EXTENT_DATA_KEY) {
3785                 /* have to trust i_size as the end */
3786                 last = (u64)-1;
3787                 last_for_get_extent = isize;
3788         } else {
3789                 /*
3790                  * remember the start of the last extent.  There are a
3791                  * bunch of different factors that go into the length of the
3792                  * extent, so its much less complex to remember where it started
3793                  */
3794                 last = found_key.offset;
3795                 last_for_get_extent = last + 1;
3796         }
3797         btrfs_free_path(path);
3798
3799         /*
3800          * we might have some extents allocated but more delalloc past those
3801          * extents.  so, we trust isize unless the start of the last extent is
3802          * beyond isize
3803          */
3804         if (last < isize) {
3805                 last = (u64)-1;
3806                 last_for_get_extent = isize;
3807         }
3808
3809         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3810                          &cached_state);
3811
3812         em = get_extent_skip_holes(inode, start, last_for_get_extent,
3813                                    get_extent);
3814         if (!em)
3815                 goto out;
3816         if (IS_ERR(em)) {
3817                 ret = PTR_ERR(em);
3818                 goto out;
3819         }
3820
3821         while (!end) {
3822                 u64 offset_in_extent;
3823
3824                 /* break if the extent we found is outside the range */
3825                 if (em->start >= max || extent_map_end(em) < off)
3826                         break;
3827
3828                 /*
3829                  * get_extent may return an extent that starts before our
3830                  * requested range.  We have to make sure the ranges
3831                  * we return to fiemap always move forward and don't
3832                  * overlap, so adjust the offsets here
3833                  */
3834                 em_start = max(em->start, off);
3835
3836                 /*
3837                  * record the offset from the start of the extent
3838                  * for adjusting the disk offset below
3839                  */
3840                 offset_in_extent = em_start - em->start;
3841                 em_end = extent_map_end(em);
3842                 em_len = em_end - em_start;
3843                 emflags = em->flags;
3844                 disko = 0;
3845                 flags = 0;
3846
3847                 /*
3848                  * bump off for our next call to get_extent
3849                  */
3850                 off = extent_map_end(em);
3851                 if (off >= max)
3852                         end = 1;
3853
3854                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3855                         end = 1;
3856                         flags |= FIEMAP_EXTENT_LAST;
3857                 } else if (em->block_start == EXTENT_MAP_INLINE) {
3858                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
3859                                   FIEMAP_EXTENT_NOT_ALIGNED);
3860                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3861                         flags |= (FIEMAP_EXTENT_DELALLOC |
3862                                   FIEMAP_EXTENT_UNKNOWN);
3863                 } else {
3864                         disko = em->block_start + offset_in_extent;
3865                 }
3866                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3867                         flags |= FIEMAP_EXTENT_ENCODED;
3868
3869                 free_extent_map(em);
3870                 em = NULL;
3871                 if ((em_start >= last) || em_len == (u64)-1 ||
3872                    (last == (u64)-1 && isize <= em_end)) {
3873                         flags |= FIEMAP_EXTENT_LAST;
3874                         end = 1;
3875                 }
3876
3877                 /* now scan forward to see if this is really the last extent. */
3878                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3879                                            get_extent);
3880                 if (IS_ERR(em)) {
3881                         ret = PTR_ERR(em);
3882                         goto out;
3883                 }
3884                 if (!em) {
3885                         flags |= FIEMAP_EXTENT_LAST;
3886                         end = 1;
3887                 }
3888                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3889                                               em_len, flags);
3890                 if (ret)
3891                         goto out_free;
3892         }
3893 out_free:
3894         free_extent_map(em);
3895 out:
3896         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3897                              &cached_state, GFP_NOFS);
3898         return ret;
3899 }
3900
3901 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3902                                               unsigned long i)
3903 {
3904         return eb->pages[i];
3905 }
3906
3907 inline unsigned long num_extent_pages(u64 start, u64 len)
3908 {
3909         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3910                 (start >> PAGE_CACHE_SHIFT);
3911 }
3912
3913 static void __free_extent_buffer(struct extent_buffer *eb)
3914 {
3915 #if LEAK_DEBUG
3916         unsigned long flags;
3917         spin_lock_irqsave(&leak_lock, flags);
3918         list_del(&eb->leak_list);
3919         spin_unlock_irqrestore(&leak_lock, flags);
3920 #endif
3921         if (eb->pages && eb->pages != eb->inline_pages)
3922                 kfree(eb->pages);
3923         kmem_cache_free(extent_buffer_cache, eb);
3924 }
3925
3926 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3927                                                    u64 start,
3928                                                    unsigned long len,
3929                                                    gfp_t mask)
3930 {
3931         struct extent_buffer *eb = NULL;
3932 #if LEAK_DEBUG
3933         unsigned long flags;
3934 #endif
3935
3936         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3937         if (eb == NULL)
3938                 return NULL;
3939         eb->start = start;
3940         eb->len = len;
3941         eb->tree = tree;
3942         eb->bflags = 0;
3943         rwlock_init(&eb->lock);
3944         atomic_set(&eb->write_locks, 0);
3945         atomic_set(&eb->read_locks, 0);
3946         atomic_set(&eb->blocking_readers, 0);
3947         atomic_set(&eb->blocking_writers, 0);
3948         atomic_set(&eb->spinning_readers, 0);
3949         atomic_set(&eb->spinning_writers, 0);
3950         eb->lock_nested = 0;
3951         init_waitqueue_head(&eb->write_lock_wq);
3952         init_waitqueue_head(&eb->read_lock_wq);
3953
3954 #if LEAK_DEBUG
3955         spin_lock_irqsave(&leak_lock, flags);
3956         list_add(&eb->leak_list, &buffers);
3957         spin_unlock_irqrestore(&leak_lock, flags);
3958 #endif
3959         spin_lock_init(&eb->refs_lock);
3960         atomic_set(&eb->refs, 1);
3961         atomic_set(&eb->io_pages, 0);
3962
3963         if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3964                 struct page **pages;
3965                 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3966                         PAGE_CACHE_SHIFT;
3967                 pages = kzalloc(num_pages, mask);
3968                 if (!pages) {
3969                         __free_extent_buffer(eb);
3970                         return NULL;
3971                 }
3972                 eb->pages = pages;
3973         } else {
3974                 eb->pages = eb->inline_pages;
3975         }
3976
3977         return eb;
3978 }
3979
3980 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
3981 {
3982         unsigned long i;
3983         struct page *p;
3984         struct extent_buffer *new;
3985         unsigned long num_pages = num_extent_pages(src->start, src->len);
3986
3987         new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
3988         if (new == NULL)
3989                 return NULL;
3990
3991         for (i = 0; i < num_pages; i++) {
3992                 p = alloc_page(GFP_ATOMIC);
3993                 BUG_ON(!p);
3994                 attach_extent_buffer_page(new, p);
3995                 WARN_ON(PageDirty(p));
3996                 SetPageUptodate(p);
3997                 new->pages[i] = p;
3998         }
3999
4000         copy_extent_buffer(new, src, 0, 0, src->len);
4001         set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4002         set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4003
4004         return new;
4005 }
4006
4007 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4008 {
4009         struct extent_buffer *eb;
4010         unsigned long num_pages = num_extent_pages(0, len);
4011         unsigned long i;
4012
4013         eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4014         if (!eb)
4015                 return NULL;
4016
4017         for (i = 0; i < num_pages; i++) {
4018                 eb->pages[i] = alloc_page(GFP_ATOMIC);
4019                 if (!eb->pages[i])
4020                         goto err;
4021         }
4022         set_extent_buffer_uptodate(eb);
4023         btrfs_set_header_nritems(eb, 0);
4024         set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4025
4026         return eb;
4027 err:
4028         for (i--; i > 0; i--)
4029                 __free_page(eb->pages[i]);
4030         __free_extent_buffer(eb);
4031         return NULL;
4032 }
4033
4034 static int extent_buffer_under_io(struct extent_buffer *eb)
4035 {
4036         return (atomic_read(&eb->io_pages) ||
4037                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4038                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4039 }
4040
4041 /*
4042  * Helper for releasing extent buffer page.
4043  */
4044 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4045                                                 unsigned long start_idx)
4046 {
4047         unsigned long index;
4048         unsigned long num_pages;
4049         struct page *page;
4050         int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4051
4052         BUG_ON(extent_buffer_under_io(eb));
4053
4054         num_pages = num_extent_pages(eb->start, eb->len);
4055         index = start_idx + num_pages;
4056         if (start_idx >= index)
4057                 return;
4058
4059         do {
4060                 index--;
4061                 page = extent_buffer_page(eb, index);
4062                 if (page && mapped) {
4063                         spin_lock(&page->mapping->private_lock);
4064                         /*
4065                          * We do this since we'll remove the pages after we've
4066                          * removed the eb from the radix tree, so we could race
4067                          * and have this page now attached to the new eb.  So
4068                          * only clear page_private if it's still connected to
4069                          * this eb.
4070                          */
4071                         if (PagePrivate(page) &&
4072                             page->private == (unsigned long)eb) {
4073                                 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4074                                 BUG_ON(PageDirty(page));
4075                                 BUG_ON(PageWriteback(page));
4076                                 /*
4077                                  * We need to make sure we haven't be attached
4078                                  * to a new eb.
4079                                  */
4080                                 ClearPagePrivate(page);
4081                                 set_page_private(page, 0);
4082                                 /* One for the page private */
4083                                 page_cache_release(page);
4084                         }
4085                         spin_unlock(&page->mapping->private_lock);
4086
4087                 }
4088                 if (page) {
4089                         /* One for when we alloced the page */
4090                         page_cache_release(page);
4091                 }
4092         } while (index != start_idx);
4093 }
4094
4095 /*
4096  * Helper for releasing the extent buffer.
4097  */
4098 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4099 {
4100         btrfs_release_extent_buffer_page(eb, 0);
4101         __free_extent_buffer(eb);
4102 }
4103
4104 static void check_buffer_tree_ref(struct extent_buffer *eb)
4105 {
4106         /* the ref bit is tricky.  We have to make sure it is set
4107          * if we have the buffer dirty.   Otherwise the
4108          * code to free a buffer can end up dropping a dirty
4109          * page
4110          *
4111          * Once the ref bit is set, it won't go away while the
4112          * buffer is dirty or in writeback, and it also won't
4113          * go away while we have the reference count on the
4114          * eb bumped.
4115          *
4116          * We can't just set the ref bit without bumping the
4117          * ref on the eb because free_extent_buffer might
4118          * see the ref bit and try to clear it.  If this happens
4119          * free_extent_buffer might end up dropping our original
4120          * ref by mistake and freeing the page before we are able
4121          * to add one more ref.
4122          *
4123          * So bump the ref count first, then set the bit.  If someone
4124          * beat us to it, drop the ref we added.
4125          */
4126         if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4127                 atomic_inc(&eb->refs);
4128                 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4129                         atomic_dec(&eb->refs);
4130         }
4131 }
4132
4133 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4134 {
4135         unsigned long num_pages, i;
4136
4137         check_buffer_tree_ref(eb);
4138
4139         num_pages = num_extent_pages(eb->start, eb->len);
4140         for (i = 0; i < num_pages; i++) {
4141                 struct page *p = extent_buffer_page(eb, i);
4142                 mark_page_accessed(p);
4143         }
4144 }
4145
4146 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4147                                           u64 start, unsigned long len)
4148 {
4149         unsigned long num_pages = num_extent_pages(start, len);
4150         unsigned long i;
4151         unsigned long index = start >> PAGE_CACHE_SHIFT;
4152         struct extent_buffer *eb;
4153         struct extent_buffer *exists = NULL;
4154         struct page *p;
4155         struct address_space *mapping = tree->mapping;
4156         int uptodate = 1;
4157         int ret;
4158
4159         rcu_read_lock();
4160         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4161         if (eb && atomic_inc_not_zero(&eb->refs)) {
4162                 rcu_read_unlock();
4163                 mark_extent_buffer_accessed(eb);
4164                 return eb;
4165         }
4166         rcu_read_unlock();
4167
4168         eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4169         if (!eb)
4170                 return NULL;
4171
4172         for (i = 0; i < num_pages; i++, index++) {
4173                 p = find_or_create_page(mapping, index, GFP_NOFS);
4174                 if (!p) {
4175                         WARN_ON(1);
4176                         goto free_eb;
4177                 }
4178
4179                 spin_lock(&mapping->private_lock);
4180                 if (PagePrivate(p)) {
4181                         /*
4182                          * We could have already allocated an eb for this page
4183                          * and attached one so lets see if we can get a ref on
4184                          * the existing eb, and if we can we know it's good and
4185                          * we can just return that one, else we know we can just
4186                          * overwrite page->private.
4187                          */
4188                         exists = (struct extent_buffer *)p->private;
4189                         if (atomic_inc_not_zero(&exists->refs)) {
4190                                 spin_unlock(&mapping->private_lock);
4191                                 unlock_page(p);
4192                                 page_cache_release(p);
4193                                 mark_extent_buffer_accessed(exists);
4194                                 goto free_eb;
4195                         }
4196
4197                         /*
4198                          * Do this so attach doesn't complain and we need to
4199                          * drop the ref the old guy had.
4200                          */
4201                         ClearPagePrivate(p);
4202                         WARN_ON(PageDirty(p));
4203                         page_cache_release(p);
4204                 }
4205                 attach_extent_buffer_page(eb, p);
4206                 spin_unlock(&mapping->private_lock);
4207                 WARN_ON(PageDirty(p));
4208                 mark_page_accessed(p);
4209                 eb->pages[i] = p;
4210                 if (!PageUptodate(p))
4211                         uptodate = 0;
4212
4213                 /*
4214                  * see below about how we avoid a nasty race with release page
4215                  * and why we unlock later
4216                  */
4217         }
4218         if (uptodate)
4219                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4220 again:
4221         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4222         if (ret)
4223                 goto free_eb;
4224
4225         spin_lock(&tree->buffer_lock);
4226         ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4227         if (ret == -EEXIST) {
4228                 exists = radix_tree_lookup(&tree->buffer,
4229                                                 start >> PAGE_CACHE_SHIFT);
4230                 if (!atomic_inc_not_zero(&exists->refs)) {
4231                         spin_unlock(&tree->buffer_lock);
4232                         radix_tree_preload_end();
4233                         exists = NULL;
4234                         goto again;
4235                 }
4236                 spin_unlock(&tree->buffer_lock);
4237                 radix_tree_preload_end();
4238                 mark_extent_buffer_accessed(exists);
4239                 goto free_eb;
4240         }
4241         /* add one reference for the tree */
4242         spin_lock(&eb->refs_lock);
4243         check_buffer_tree_ref(eb);
4244         spin_unlock(&eb->refs_lock);
4245         spin_unlock(&tree->buffer_lock);
4246         radix_tree_preload_end();
4247
4248         /*
4249          * there is a race where release page may have
4250          * tried to find this extent buffer in the radix
4251          * but failed.  It will tell the VM it is safe to
4252          * reclaim the, and it will clear the page private bit.
4253          * We must make sure to set the page private bit properly
4254          * after the extent buffer is in the radix tree so
4255          * it doesn't get lost
4256          */
4257         SetPageChecked(eb->pages[0]);
4258         for (i = 1; i < num_pages; i++) {
4259                 p = extent_buffer_page(eb, i);
4260                 ClearPageChecked(p);
4261                 unlock_page(p);
4262         }
4263         unlock_page(eb->pages[0]);
4264         return eb;
4265
4266 free_eb:
4267         for (i = 0; i < num_pages; i++) {
4268                 if (eb->pages[i])
4269                         unlock_page(eb->pages[i]);
4270         }
4271
4272         WARN_ON(!atomic_dec_and_test(&eb->refs));
4273         btrfs_release_extent_buffer(eb);
4274         return exists;
4275 }
4276
4277 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4278                                          u64 start, unsigned long len)
4279 {
4280         struct extent_buffer *eb;
4281
4282         rcu_read_lock();
4283         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4284         if (eb && atomic_inc_not_zero(&eb->refs)) {
4285                 rcu_read_unlock();
4286                 mark_extent_buffer_accessed(eb);
4287                 return eb;
4288         }
4289         rcu_read_unlock();
4290
4291         return NULL;
4292 }
4293
4294 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4295 {
4296         struct extent_buffer *eb =
4297                         container_of(head, struct extent_buffer, rcu_head);
4298
4299         __free_extent_buffer(eb);
4300 }
4301
4302 /* Expects to have eb->eb_lock already held */
4303 static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4304 {
4305         WARN_ON(atomic_read(&eb->refs) == 0);
4306         if (atomic_dec_and_test(&eb->refs)) {
4307                 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4308                         spin_unlock(&eb->refs_lock);
4309                 } else {
4310                         struct extent_io_tree *tree = eb->tree;
4311
4312                         spin_unlock(&eb->refs_lock);
4313
4314                         spin_lock(&tree->buffer_lock);
4315                         radix_tree_delete(&tree->buffer,
4316                                           eb->start >> PAGE_CACHE_SHIFT);
4317                         spin_unlock(&tree->buffer_lock);
4318                 }
4319
4320                 /* Should be safe to release our pages at this point */
4321                 btrfs_release_extent_buffer_page(eb, 0);
4322
4323                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4324                 return;
4325         }
4326         spin_unlock(&eb->refs_lock);
4327 }
4328
4329 void free_extent_buffer(struct extent_buffer *eb)
4330 {
4331         if (!eb)
4332                 return;
4333
4334         spin_lock(&eb->refs_lock);
4335         if (atomic_read(&eb->refs) == 2 &&
4336             test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4337                 atomic_dec(&eb->refs);
4338
4339         if (atomic_read(&eb->refs) == 2 &&
4340             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4341             !extent_buffer_under_io(eb) &&
4342             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4343                 atomic_dec(&eb->refs);
4344
4345         /*
4346          * I know this is terrible, but it's temporary until we stop tracking
4347          * the uptodate bits and such for the extent buffers.
4348          */
4349         release_extent_buffer(eb, GFP_ATOMIC);
4350 }
4351
4352 void free_extent_buffer_stale(struct extent_buffer *eb)
4353 {
4354         if (!eb)
4355                 return;
4356
4357         spin_lock(&eb->refs_lock);
4358         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4359
4360         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4361             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4362                 atomic_dec(&eb->refs);
4363         release_extent_buffer(eb, GFP_NOFS);
4364 }
4365
4366 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4367 {
4368         unsigned long i;
4369         unsigned long num_pages;
4370         struct page *page;
4371
4372         num_pages = num_extent_pages(eb->start, eb->len);
4373
4374         for (i = 0; i < num_pages; i++) {
4375                 page = extent_buffer_page(eb, i);
4376                 if (!PageDirty(page))
4377                         continue;
4378
4379                 lock_page(page);
4380                 WARN_ON(!PagePrivate(page));
4381
4382                 clear_page_dirty_for_io(page);
4383                 spin_lock_irq(&page->mapping->tree_lock);
4384                 if (!PageDirty(page)) {
4385                         radix_tree_tag_clear(&page->mapping->page_tree,
4386                                                 page_index(page),
4387                                                 PAGECACHE_TAG_DIRTY);
4388                 }
4389                 spin_unlock_irq(&page->mapping->tree_lock);
4390                 ClearPageError(page);
4391                 unlock_page(page);
4392         }
4393         WARN_ON(atomic_read(&eb->refs) == 0);
4394 }
4395
4396 int set_extent_buffer_dirty(struct extent_buffer *eb)
4397 {
4398         unsigned long i;
4399         unsigned long num_pages;
4400         int was_dirty = 0;
4401
4402         check_buffer_tree_ref(eb);
4403
4404         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4405
4406         num_pages = num_extent_pages(eb->start, eb->len);
4407         WARN_ON(atomic_read(&eb->refs) == 0);
4408         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4409
4410         for (i = 0; i < num_pages; i++)
4411                 set_page_dirty(extent_buffer_page(eb, i));
4412         return was_dirty;
4413 }
4414
4415 static int range_straddles_pages(u64 start, u64 len)
4416 {
4417         if (len < PAGE_CACHE_SIZE)
4418                 return 1;
4419         if (start & (PAGE_CACHE_SIZE - 1))
4420                 return 1;
4421         if ((start + len) & (PAGE_CACHE_SIZE - 1))
4422                 return 1;
4423         return 0;
4424 }
4425
4426 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4427 {
4428         unsigned long i;
4429         struct page *page;
4430         unsigned long num_pages;
4431
4432         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4433         num_pages = num_extent_pages(eb->start, eb->len);
4434         for (i = 0; i < num_pages; i++) {
4435                 page = extent_buffer_page(eb, i);
4436                 if (page)
4437                         ClearPageUptodate(page);
4438         }
4439         return 0;
4440 }
4441
4442 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4443 {
4444         unsigned long i;
4445         struct page *page;
4446         unsigned long num_pages;
4447
4448         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4449         num_pages = num_extent_pages(eb->start, eb->len);
4450         for (i = 0; i < num_pages; i++) {
4451                 page = extent_buffer_page(eb, i);
4452                 SetPageUptodate(page);
4453         }
4454         return 0;
4455 }
4456
4457 int extent_range_uptodate(struct extent_io_tree *tree,
4458                           u64 start, u64 end)
4459 {
4460         struct page *page;
4461         int ret;
4462         int pg_uptodate = 1;
4463         int uptodate;
4464         unsigned long index;
4465
4466         if (range_straddles_pages(start, end - start + 1)) {
4467                 ret = test_range_bit(tree, start, end,
4468                                      EXTENT_UPTODATE, 1, NULL);
4469                 if (ret)
4470                         return 1;
4471         }
4472         while (start <= end) {
4473                 index = start >> PAGE_CACHE_SHIFT;
4474                 page = find_get_page(tree->mapping, index);
4475                 if (!page)
4476                         return 1;
4477                 uptodate = PageUptodate(page);
4478                 page_cache_release(page);
4479                 if (!uptodate) {
4480                         pg_uptodate = 0;
4481                         break;
4482                 }
4483                 start += PAGE_CACHE_SIZE;
4484         }
4485         return pg_uptodate;
4486 }
4487
4488 int extent_buffer_uptodate(struct extent_buffer *eb)
4489 {
4490         return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4491 }
4492
4493 int read_extent_buffer_pages(struct extent_io_tree *tree,
4494                              struct extent_buffer *eb, u64 start, int wait,
4495                              get_extent_t *get_extent, int mirror_num)
4496 {
4497         unsigned long i;
4498         unsigned long start_i;
4499         struct page *page;
4500         int err;
4501         int ret = 0;
4502         int locked_pages = 0;
4503         int all_uptodate = 1;
4504         unsigned long num_pages;
4505         unsigned long num_reads = 0;
4506         struct bio *bio = NULL;
4507         unsigned long bio_flags = 0;
4508
4509         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4510                 return 0;
4511
4512         if (start) {
4513                 WARN_ON(start < eb->start);
4514                 start_i = (start >> PAGE_CACHE_SHIFT) -
4515                         (eb->start >> PAGE_CACHE_SHIFT);
4516         } else {
4517                 start_i = 0;
4518         }
4519
4520         num_pages = num_extent_pages(eb->start, eb->len);
4521         for (i = start_i; i < num_pages; i++) {
4522                 page = extent_buffer_page(eb, i);
4523                 if (wait == WAIT_NONE) {
4524                         if (!trylock_page(page))
4525                                 goto unlock_exit;
4526                 } else {
4527                         lock_page(page);
4528                 }
4529                 locked_pages++;
4530                 if (!PageUptodate(page)) {
4531                         num_reads++;
4532                         all_uptodate = 0;
4533                 }
4534         }
4535         if (all_uptodate) {
4536                 if (start_i == 0)
4537                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4538                 goto unlock_exit;
4539         }
4540
4541         clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4542         eb->read_mirror = 0;
4543         atomic_set(&eb->io_pages, num_reads);
4544         for (i = start_i; i < num_pages; i++) {
4545                 page = extent_buffer_page(eb, i);
4546                 if (!PageUptodate(page)) {
4547                         ClearPageError(page);
4548                         err = __extent_read_full_page(tree, page,
4549                                                       get_extent, &bio,
4550                                                       mirror_num, &bio_flags);
4551                         if (err)
4552                                 ret = err;
4553                 } else {
4554                         unlock_page(page);
4555                 }
4556         }
4557
4558         if (bio) {
4559                 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4560                 if (err)
4561                         return err;
4562         }
4563
4564         if (ret || wait != WAIT_COMPLETE)
4565                 return ret;
4566
4567         for (i = start_i; i < num_pages; i++) {
4568                 page = extent_buffer_page(eb, i);
4569                 wait_on_page_locked(page);
4570                 if (!PageUptodate(page))
4571                         ret = -EIO;
4572         }
4573
4574         return ret;
4575
4576 unlock_exit:
4577         i = start_i;
4578         while (locked_pages > 0) {
4579                 page = extent_buffer_page(eb, i);
4580                 i++;
4581                 unlock_page(page);
4582                 locked_pages--;
4583         }
4584         return ret;
4585 }
4586
4587 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4588                         unsigned long start,
4589                         unsigned long len)
4590 {
4591         size_t cur;
4592         size_t offset;
4593         struct page *page;
4594         char *kaddr;
4595         char *dst = (char *)dstv;
4596         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4597         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4598
4599         WARN_ON(start > eb->len);
4600         WARN_ON(start + len > eb->start + eb->len);
4601
4602         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4603
4604         while (len > 0) {
4605                 page = extent_buffer_page(eb, i);
4606
4607                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4608                 kaddr = page_address(page);
4609                 memcpy(dst, kaddr + offset, cur);
4610
4611                 dst += cur;
4612                 len -= cur;
4613                 offset = 0;
4614                 i++;
4615         }
4616 }
4617
4618 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4619                                unsigned long min_len, char **map,
4620                                unsigned long *map_start,
4621                                unsigned long *map_len)
4622 {
4623         size_t offset = start & (PAGE_CACHE_SIZE - 1);
4624         char *kaddr;
4625         struct page *p;
4626         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4627         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4628         unsigned long end_i = (start_offset + start + min_len - 1) >>
4629                 PAGE_CACHE_SHIFT;
4630
4631         if (i != end_i)
4632                 return -EINVAL;
4633
4634         if (i == 0) {
4635                 offset = start_offset;
4636                 *map_start = 0;
4637         } else {
4638                 offset = 0;
4639                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4640         }
4641
4642         if (start + min_len > eb->len) {
4643                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4644                        "wanted %lu %lu\n", (unsigned long long)eb->start,
4645                        eb->len, start, min_len);
4646                 WARN_ON(1);
4647                 return -EINVAL;
4648         }
4649
4650         p = extent_buffer_page(eb, i);
4651         kaddr = page_address(p);
4652         *map = kaddr + offset;
4653         *map_len = PAGE_CACHE_SIZE - offset;
4654         return 0;
4655 }
4656
4657 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4658                           unsigned long start,
4659                           unsigned long len)
4660 {
4661         size_t cur;
4662         size_t offset;
4663         struct page *page;
4664         char *kaddr;
4665         char *ptr = (char *)ptrv;
4666         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4667         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4668         int ret = 0;
4669
4670         WARN_ON(start > eb->len);
4671         WARN_ON(start + len > eb->start + eb->len);
4672
4673         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4674
4675         while (len > 0) {
4676                 page = extent_buffer_page(eb, i);
4677
4678                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4679
4680                 kaddr = page_address(page);
4681                 ret = memcmp(ptr, kaddr + offset, cur);
4682                 if (ret)
4683                         break;
4684
4685                 ptr += cur;
4686                 len -= cur;
4687                 offset = 0;
4688                 i++;
4689         }
4690         return ret;
4691 }
4692
4693 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4694                          unsigned long start, unsigned long len)
4695 {
4696         size_t cur;
4697         size_t offset;
4698         struct page *page;
4699         char *kaddr;
4700         char *src = (char *)srcv;
4701         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4702         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4703
4704         WARN_ON(start > eb->len);
4705         WARN_ON(start + len > eb->start + eb->len);
4706
4707         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4708
4709         while (len > 0) {
4710                 page = extent_buffer_page(eb, i);
4711                 WARN_ON(!PageUptodate(page));
4712
4713                 cur = min(len, PAGE_CACHE_SIZE - offset);
4714                 kaddr = page_address(page);
4715                 memcpy(kaddr + offset, src, cur);
4716
4717                 src += cur;
4718                 len -= cur;
4719                 offset = 0;
4720                 i++;
4721         }
4722 }
4723
4724 void memset_extent_buffer(struct extent_buffer *eb, char c,
4725                           unsigned long start, unsigned long len)
4726 {
4727         size_t cur;
4728         size_t offset;
4729         struct page *page;
4730         char *kaddr;
4731         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4732         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4733
4734         WARN_ON(start > eb->len);
4735         WARN_ON(start + len > eb->start + eb->len);
4736
4737         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4738
4739         while (len > 0) {
4740                 page = extent_buffer_page(eb, i);
4741                 WARN_ON(!PageUptodate(page));
4742
4743                 cur = min(len, PAGE_CACHE_SIZE - offset);
4744                 kaddr = page_address(page);
4745                 memset(kaddr + offset, c, cur);
4746
4747                 len -= cur;
4748                 offset = 0;
4749                 i++;
4750         }
4751 }
4752
4753 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4754                         unsigned long dst_offset, unsigned long src_offset,
4755                         unsigned long len)
4756 {
4757         u64 dst_len = dst->len;
4758         size_t cur;
4759         size_t offset;
4760         struct page *page;
4761         char *kaddr;
4762         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4763         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4764
4765         WARN_ON(src->len != dst_len);
4766
4767         offset = (start_offset + dst_offset) &
4768                 ((unsigned long)PAGE_CACHE_SIZE - 1);
4769
4770         while (len > 0) {
4771                 page = extent_buffer_page(dst, i);
4772                 WARN_ON(!PageUptodate(page));
4773
4774                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4775
4776                 kaddr = page_address(page);
4777                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4778
4779                 src_offset += cur;
4780                 len -= cur;
4781                 offset = 0;
4782                 i++;
4783         }
4784 }
4785
4786 static void move_pages(struct page *dst_page, struct page *src_page,
4787                        unsigned long dst_off, unsigned long src_off,
4788                        unsigned long len)
4789 {
4790         char *dst_kaddr = page_address(dst_page);
4791         if (dst_page == src_page) {
4792                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4793         } else {
4794                 char *src_kaddr = page_address(src_page);
4795                 char *p = dst_kaddr + dst_off + len;
4796                 char *s = src_kaddr + src_off + len;
4797
4798                 while (len--)
4799                         *--p = *--s;
4800         }
4801 }
4802
4803 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4804 {
4805         unsigned long distance = (src > dst) ? src - dst : dst - src;
4806         return distance < len;
4807 }
4808
4809 static void copy_pages(struct page *dst_page, struct page *src_page,
4810                        unsigned long dst_off, unsigned long src_off,
4811                        unsigned long len)
4812 {
4813         char *dst_kaddr = page_address(dst_page);
4814         char *src_kaddr;
4815         int must_memmove = 0;
4816
4817         if (dst_page != src_page) {
4818                 src_kaddr = page_address(src_page);
4819         } else {
4820                 src_kaddr = dst_kaddr;
4821                 if (areas_overlap(src_off, dst_off, len))
4822                         must_memmove = 1;
4823         }
4824
4825         if (must_memmove)
4826                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4827         else
4828                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4829 }
4830
4831 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4832                            unsigned long src_offset, unsigned long len)
4833 {
4834         size_t cur;
4835         size_t dst_off_in_page;
4836         size_t src_off_in_page;
4837         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4838         unsigned long dst_i;
4839         unsigned long src_i;
4840
4841         if (src_offset + len > dst->len) {
4842                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4843                        "len %lu dst len %lu\n", src_offset, len, dst->len);
4844                 BUG_ON(1);
4845         }
4846         if (dst_offset + len > dst->len) {
4847                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4848                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
4849                 BUG_ON(1);
4850         }
4851
4852         while (len > 0) {
4853                 dst_off_in_page = (start_offset + dst_offset) &
4854                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4855                 src_off_in_page = (start_offset + src_offset) &
4856                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4857
4858                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4859                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4860
4861                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4862                                                src_off_in_page));
4863                 cur = min_t(unsigned long, cur,
4864                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4865
4866                 copy_pages(extent_buffer_page(dst, dst_i),
4867                            extent_buffer_page(dst, src_i),
4868                            dst_off_in_page, src_off_in_page, cur);
4869
4870                 src_offset += cur;
4871                 dst_offset += cur;
4872                 len -= cur;
4873         }
4874 }
4875
4876 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4877                            unsigned long src_offset, unsigned long len)
4878 {
4879         size_t cur;
4880         size_t dst_off_in_page;
4881         size_t src_off_in_page;
4882         unsigned long dst_end = dst_offset + len - 1;
4883         unsigned long src_end = src_offset + len - 1;
4884         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4885         unsigned long dst_i;
4886         unsigned long src_i;
4887
4888         if (src_offset + len > dst->len) {
4889                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4890                        "len %lu len %lu\n", src_offset, len, dst->len);
4891                 BUG_ON(1);
4892         }
4893         if (dst_offset + len > dst->len) {
4894                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4895                        "len %lu len %lu\n", dst_offset, len, dst->len);
4896                 BUG_ON(1);
4897         }
4898         if (dst_offset < src_offset) {
4899                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4900                 return;
4901         }
4902         while (len > 0) {
4903                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4904                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4905
4906                 dst_off_in_page = (start_offset + dst_end) &
4907                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4908                 src_off_in_page = (start_offset + src_end) &
4909                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4910
4911                 cur = min_t(unsigned long, len, src_off_in_page + 1);
4912                 cur = min(cur, dst_off_in_page + 1);
4913                 move_pages(extent_buffer_page(dst, dst_i),
4914                            extent_buffer_page(dst, src_i),
4915                            dst_off_in_page - cur + 1,
4916                            src_off_in_page - cur + 1, cur);
4917
4918                 dst_end -= cur;
4919                 src_end -= cur;
4920                 len -= cur;
4921         }
4922 }
4923
4924 int try_release_extent_buffer(struct page *page, gfp_t mask)
4925 {
4926         struct extent_buffer *eb;
4927
4928         /*
4929          * We need to make sure noboody is attaching this page to an eb right
4930          * now.
4931          */
4932         spin_lock(&page->mapping->private_lock);
4933         if (!PagePrivate(page)) {
4934                 spin_unlock(&page->mapping->private_lock);
4935                 return 1;
4936         }
4937
4938         eb = (struct extent_buffer *)page->private;
4939         BUG_ON(!eb);
4940
4941         /*
4942          * This is a little awful but should be ok, we need to make sure that
4943          * the eb doesn't disappear out from under us while we're looking at
4944          * this page.
4945          */
4946         spin_lock(&eb->refs_lock);
4947         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4948                 spin_unlock(&eb->refs_lock);
4949                 spin_unlock(&page->mapping->private_lock);
4950                 return 0;
4951         }
4952         spin_unlock(&page->mapping->private_lock);
4953
4954         if ((mask & GFP_NOFS) == GFP_NOFS)
4955                 mask = GFP_NOFS;
4956
4957         /*
4958          * If tree ref isn't set then we know the ref on this eb is a real ref,
4959          * so just return, this page will likely be freed soon anyway.
4960          */
4961         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4962                 spin_unlock(&eb->refs_lock);
4963                 return 0;
4964         }
4965         release_extent_buffer(eb, mask);
4966
4967         return 1;
4968 }