]> Pileus Git - ~andy/linux/blob - fs/btrfs/extent_io.c
Btrfs: enforce metadata allocation clustering
[~andy/linux] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23                                        unsigned long extra_flags,
24                                        void (*ctor)(void *, struct kmem_cache *,
25                                                     unsigned long));
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
32
33 #define LEAK_DEBUG 1
34 #ifdef LEAK_DEBUG
35 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
36 #endif
37
38 #define BUFFER_LRU_MAX 64
39
40 struct tree_entry {
41         u64 start;
42         u64 end;
43         struct rb_node rb_node;
44 };
45
46 struct extent_page_data {
47         struct bio *bio;
48         struct extent_io_tree *tree;
49         get_extent_t *get_extent;
50
51         /* tells writepage not to lock the state bits for this range
52          * it still does the unlocking
53          */
54         int extent_locked;
55 };
56
57 int __init extent_io_init(void)
58 {
59         extent_state_cache = btrfs_cache_create("extent_state",
60                                             sizeof(struct extent_state), 0,
61                                             NULL);
62         if (!extent_state_cache)
63                 return -ENOMEM;
64
65         extent_buffer_cache = btrfs_cache_create("extent_buffers",
66                                             sizeof(struct extent_buffer), 0,
67                                             NULL);
68         if (!extent_buffer_cache)
69                 goto free_state_cache;
70         return 0;
71
72 free_state_cache:
73         kmem_cache_destroy(extent_state_cache);
74         return -ENOMEM;
75 }
76
77 void extent_io_exit(void)
78 {
79         struct extent_state *state;
80         struct extent_buffer *eb;
81
82         while (!list_empty(&states)) {
83                 state = list_entry(states.next, struct extent_state, leak_list);
84                 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
85                 list_del(&state->leak_list);
86                 kmem_cache_free(extent_state_cache, state);
87
88         }
89
90         while (!list_empty(&buffers)) {
91                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
92                 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
93                 list_del(&eb->leak_list);
94                 kmem_cache_free(extent_buffer_cache, eb);
95         }
96         if (extent_state_cache)
97                 kmem_cache_destroy(extent_state_cache);
98         if (extent_buffer_cache)
99                 kmem_cache_destroy(extent_buffer_cache);
100 }
101
102 void extent_io_tree_init(struct extent_io_tree *tree,
103                           struct address_space *mapping, gfp_t mask)
104 {
105         tree->state.rb_node = NULL;
106         tree->buffer.rb_node = NULL;
107         tree->ops = NULL;
108         tree->dirty_bytes = 0;
109         spin_lock_init(&tree->lock);
110         spin_lock_init(&tree->buffer_lock);
111         tree->mapping = mapping;
112 }
113 EXPORT_SYMBOL(extent_io_tree_init);
114
115 struct extent_state *alloc_extent_state(gfp_t mask)
116 {
117         struct extent_state *state;
118 #ifdef LEAK_DEBUG
119         unsigned long flags;
120 #endif
121
122         state = kmem_cache_alloc(extent_state_cache, mask);
123         if (!state)
124                 return state;
125         state->state = 0;
126         state->private = 0;
127         state->tree = NULL;
128 #ifdef LEAK_DEBUG
129         spin_lock_irqsave(&leak_lock, flags);
130         list_add(&state->leak_list, &states);
131         spin_unlock_irqrestore(&leak_lock, flags);
132 #endif
133         atomic_set(&state->refs, 1);
134         init_waitqueue_head(&state->wq);
135         return state;
136 }
137 EXPORT_SYMBOL(alloc_extent_state);
138
139 void free_extent_state(struct extent_state *state)
140 {
141         if (!state)
142                 return;
143         if (atomic_dec_and_test(&state->refs)) {
144 #ifdef LEAK_DEBUG
145                 unsigned long flags;
146 #endif
147                 WARN_ON(state->tree);
148 #ifdef LEAK_DEBUG
149                 spin_lock_irqsave(&leak_lock, flags);
150                 list_del(&state->leak_list);
151                 spin_unlock_irqrestore(&leak_lock, flags);
152 #endif
153                 kmem_cache_free(extent_state_cache, state);
154         }
155 }
156 EXPORT_SYMBOL(free_extent_state);
157
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159                                    struct rb_node *node)
160 {
161         struct rb_node ** p = &root->rb_node;
162         struct rb_node * parent = NULL;
163         struct tree_entry *entry;
164
165         while(*p) {
166                 parent = *p;
167                 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169                 if (offset < entry->start)
170                         p = &(*p)->rb_left;
171                 else if (offset > entry->end)
172                         p = &(*p)->rb_right;
173                 else
174                         return parent;
175         }
176
177         entry = rb_entry(node, struct tree_entry, rb_node);
178         rb_link_node(node, parent, p);
179         rb_insert_color(node, root);
180         return NULL;
181 }
182
183 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
184                                      struct rb_node **prev_ret,
185                                      struct rb_node **next_ret)
186 {
187         struct rb_root *root = &tree->state;
188         struct rb_node * n = root->rb_node;
189         struct rb_node *prev = NULL;
190         struct rb_node *orig_prev = NULL;
191         struct tree_entry *entry;
192         struct tree_entry *prev_entry = NULL;
193
194         while(n) {
195                 entry = rb_entry(n, struct tree_entry, rb_node);
196                 prev = n;
197                 prev_entry = entry;
198
199                 if (offset < entry->start)
200                         n = n->rb_left;
201                 else if (offset > entry->end)
202                         n = n->rb_right;
203                 else {
204                         return n;
205                 }
206         }
207
208         if (prev_ret) {
209                 orig_prev = prev;
210                 while(prev && offset > prev_entry->end) {
211                         prev = rb_next(prev);
212                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213                 }
214                 *prev_ret = prev;
215                 prev = orig_prev;
216         }
217
218         if (next_ret) {
219                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220                 while(prev && offset < prev_entry->start) {
221                         prev = rb_prev(prev);
222                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223                 }
224                 *next_ret = prev;
225         }
226         return NULL;
227 }
228
229 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
230                                           u64 offset)
231 {
232         struct rb_node *prev = NULL;
233         struct rb_node *ret;
234
235         ret = __etree_search(tree, offset, &prev, NULL);
236         if (!ret) {
237                 return prev;
238         }
239         return ret;
240 }
241
242 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
243                                           u64 offset, struct rb_node *node)
244 {
245         struct rb_root *root = &tree->buffer;
246         struct rb_node ** p = &root->rb_node;
247         struct rb_node * parent = NULL;
248         struct extent_buffer *eb;
249
250         while(*p) {
251                 parent = *p;
252                 eb = rb_entry(parent, struct extent_buffer, rb_node);
253
254                 if (offset < eb->start)
255                         p = &(*p)->rb_left;
256                 else if (offset > eb->start)
257                         p = &(*p)->rb_right;
258                 else
259                         return eb;
260         }
261
262         rb_link_node(node, parent, p);
263         rb_insert_color(node, root);
264         return NULL;
265 }
266
267 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
268                                            u64 offset)
269 {
270         struct rb_root *root = &tree->buffer;
271         struct rb_node * n = root->rb_node;
272         struct extent_buffer *eb;
273
274         while(n) {
275                 eb = rb_entry(n, struct extent_buffer, rb_node);
276                 if (offset < eb->start)
277                         n = n->rb_left;
278                 else if (offset > eb->start)
279                         n = n->rb_right;
280                 else
281                         return eb;
282         }
283         return NULL;
284 }
285
286 /*
287  * utility function to look for merge candidates inside a given range.
288  * Any extents with matching state are merged together into a single
289  * extent in the tree.  Extents with EXTENT_IO in their state field
290  * are not merged because the end_io handlers need to be able to do
291  * operations on them without sleeping (or doing allocations/splits).
292  *
293  * This should be called with the tree lock held.
294  */
295 static int merge_state(struct extent_io_tree *tree,
296                        struct extent_state *state)
297 {
298         struct extent_state *other;
299         struct rb_node *other_node;
300
301         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
302                 return 0;
303
304         other_node = rb_prev(&state->rb_node);
305         if (other_node) {
306                 other = rb_entry(other_node, struct extent_state, rb_node);
307                 if (other->end == state->start - 1 &&
308                     other->state == state->state) {
309                         state->start = other->start;
310                         other->tree = NULL;
311                         rb_erase(&other->rb_node, &tree->state);
312                         free_extent_state(other);
313                 }
314         }
315         other_node = rb_next(&state->rb_node);
316         if (other_node) {
317                 other = rb_entry(other_node, struct extent_state, rb_node);
318                 if (other->start == state->end + 1 &&
319                     other->state == state->state) {
320                         other->start = state->start;
321                         state->tree = NULL;
322                         rb_erase(&state->rb_node, &tree->state);
323                         free_extent_state(state);
324                 }
325         }
326         return 0;
327 }
328
329 static void set_state_cb(struct extent_io_tree *tree,
330                          struct extent_state *state,
331                          unsigned long bits)
332 {
333         if (tree->ops && tree->ops->set_bit_hook) {
334                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
335                                         state->end, state->state, bits);
336         }
337 }
338
339 static void clear_state_cb(struct extent_io_tree *tree,
340                            struct extent_state *state,
341                            unsigned long bits)
342 {
343         if (tree->ops && tree->ops->set_bit_hook) {
344                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
345                                           state->end, state->state, bits);
346         }
347 }
348
349 /*
350  * insert an extent_state struct into the tree.  'bits' are set on the
351  * struct before it is inserted.
352  *
353  * This may return -EEXIST if the extent is already there, in which case the
354  * state struct is freed.
355  *
356  * The tree lock is not taken internally.  This is a utility function and
357  * probably isn't what you want to call (see set/clear_extent_bit).
358  */
359 static int insert_state(struct extent_io_tree *tree,
360                         struct extent_state *state, u64 start, u64 end,
361                         int bits)
362 {
363         struct rb_node *node;
364
365         if (end < start) {
366                 printk("end < start %Lu %Lu\n", end, start);
367                 WARN_ON(1);
368         }
369         if (bits & EXTENT_DIRTY)
370                 tree->dirty_bytes += end - start + 1;
371         set_state_cb(tree, state, bits);
372         state->state |= bits;
373         state->start = start;
374         state->end = end;
375         node = tree_insert(&tree->state, end, &state->rb_node);
376         if (node) {
377                 struct extent_state *found;
378                 found = rb_entry(node, struct extent_state, rb_node);
379                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
380                 free_extent_state(state);
381                 return -EEXIST;
382         }
383         state->tree = tree;
384         merge_state(tree, state);
385         return 0;
386 }
387
388 /*
389  * split a given extent state struct in two, inserting the preallocated
390  * struct 'prealloc' as the newly created second half.  'split' indicates an
391  * offset inside 'orig' where it should be split.
392  *
393  * Before calling,
394  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
395  * are two extent state structs in the tree:
396  * prealloc: [orig->start, split - 1]
397  * orig: [ split, orig->end ]
398  *
399  * The tree locks are not taken by this function. They need to be held
400  * by the caller.
401  */
402 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
403                        struct extent_state *prealloc, u64 split)
404 {
405         struct rb_node *node;
406         prealloc->start = orig->start;
407         prealloc->end = split - 1;
408         prealloc->state = orig->state;
409         orig->start = split;
410
411         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
412         if (node) {
413                 struct extent_state *found;
414                 found = rb_entry(node, struct extent_state, rb_node);
415                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
416                 free_extent_state(prealloc);
417                 return -EEXIST;
418         }
419         prealloc->tree = tree;
420         return 0;
421 }
422
423 /*
424  * utility function to clear some bits in an extent state struct.
425  * it will optionally wake up any one waiting on this state (wake == 1), or
426  * forcibly remove the state from the tree (delete == 1).
427  *
428  * If no bits are set on the state struct after clearing things, the
429  * struct is freed and removed from the tree
430  */
431 static int clear_state_bit(struct extent_io_tree *tree,
432                             struct extent_state *state, int bits, int wake,
433                             int delete)
434 {
435         int ret = state->state & bits;
436
437         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
438                 u64 range = state->end - state->start + 1;
439                 WARN_ON(range > tree->dirty_bytes);
440                 tree->dirty_bytes -= range;
441         }
442         clear_state_cb(tree, state, bits);
443         state->state &= ~bits;
444         if (wake)
445                 wake_up(&state->wq);
446         if (delete || state->state == 0) {
447                 if (state->tree) {
448                         clear_state_cb(tree, state, state->state);
449                         rb_erase(&state->rb_node, &tree->state);
450                         state->tree = NULL;
451                         free_extent_state(state);
452                 } else {
453                         WARN_ON(1);
454                 }
455         } else {
456                 merge_state(tree, state);
457         }
458         return ret;
459 }
460
461 /*
462  * clear some bits on a range in the tree.  This may require splitting
463  * or inserting elements in the tree, so the gfp mask is used to
464  * indicate which allocations or sleeping are allowed.
465  *
466  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
467  * the given range from the tree regardless of state (ie for truncate).
468  *
469  * the range [start, end] is inclusive.
470  *
471  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
472  * bits were already set, or zero if none of the bits were already set.
473  */
474 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
475                      int bits, int wake, int delete, gfp_t mask)
476 {
477         struct extent_state *state;
478         struct extent_state *prealloc = NULL;
479         struct rb_node *node;
480         unsigned long flags;
481         int err;
482         int set = 0;
483
484 again:
485         if (!prealloc && (mask & __GFP_WAIT)) {
486                 prealloc = alloc_extent_state(mask);
487                 if (!prealloc)
488                         return -ENOMEM;
489         }
490
491         spin_lock_irqsave(&tree->lock, flags);
492         /*
493          * this search will find the extents that end after
494          * our range starts
495          */
496         node = tree_search(tree, start);
497         if (!node)
498                 goto out;
499         state = rb_entry(node, struct extent_state, rb_node);
500         if (state->start > end)
501                 goto out;
502         WARN_ON(state->end < start);
503
504         /*
505          *     | ---- desired range ---- |
506          *  | state | or
507          *  | ------------- state -------------- |
508          *
509          * We need to split the extent we found, and may flip
510          * bits on second half.
511          *
512          * If the extent we found extends past our range, we
513          * just split and search again.  It'll get split again
514          * the next time though.
515          *
516          * If the extent we found is inside our range, we clear
517          * the desired bit on it.
518          */
519
520         if (state->start < start) {
521                 if (!prealloc)
522                         prealloc = alloc_extent_state(GFP_ATOMIC);
523                 err = split_state(tree, state, prealloc, start);
524                 BUG_ON(err == -EEXIST);
525                 prealloc = NULL;
526                 if (err)
527                         goto out;
528                 if (state->end <= end) {
529                         start = state->end + 1;
530                         set |= clear_state_bit(tree, state, bits,
531                                         wake, delete);
532                 } else {
533                         start = state->start;
534                 }
535                 goto search_again;
536         }
537         /*
538          * | ---- desired range ---- |
539          *                        | state |
540          * We need to split the extent, and clear the bit
541          * on the first half
542          */
543         if (state->start <= end && state->end > end) {
544                 if (!prealloc)
545                         prealloc = alloc_extent_state(GFP_ATOMIC);
546                 err = split_state(tree, state, prealloc, end + 1);
547                 BUG_ON(err == -EEXIST);
548
549                 if (wake)
550                         wake_up(&state->wq);
551                 set |= clear_state_bit(tree, prealloc, bits,
552                                        wake, delete);
553                 prealloc = NULL;
554                 goto out;
555         }
556
557         start = state->end + 1;
558         set |= clear_state_bit(tree, state, bits, wake, delete);
559         goto search_again;
560
561 out:
562         spin_unlock_irqrestore(&tree->lock, flags);
563         if (prealloc)
564                 free_extent_state(prealloc);
565
566         return set;
567
568 search_again:
569         if (start > end)
570                 goto out;
571         spin_unlock_irqrestore(&tree->lock, flags);
572         if (mask & __GFP_WAIT)
573                 cond_resched();
574         goto again;
575 }
576 EXPORT_SYMBOL(clear_extent_bit);
577
578 static int wait_on_state(struct extent_io_tree *tree,
579                          struct extent_state *state)
580 {
581         DEFINE_WAIT(wait);
582         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
583         spin_unlock_irq(&tree->lock);
584         schedule();
585         spin_lock_irq(&tree->lock);
586         finish_wait(&state->wq, &wait);
587         return 0;
588 }
589
590 /*
591  * waits for one or more bits to clear on a range in the state tree.
592  * The range [start, end] is inclusive.
593  * The tree lock is taken by this function
594  */
595 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
596 {
597         struct extent_state *state;
598         struct rb_node *node;
599
600         spin_lock_irq(&tree->lock);
601 again:
602         while (1) {
603                 /*
604                  * this search will find all the extents that end after
605                  * our range starts
606                  */
607                 node = tree_search(tree, start);
608                 if (!node)
609                         break;
610
611                 state = rb_entry(node, struct extent_state, rb_node);
612
613                 if (state->start > end)
614                         goto out;
615
616                 if (state->state & bits) {
617                         start = state->start;
618                         atomic_inc(&state->refs);
619                         wait_on_state(tree, state);
620                         free_extent_state(state);
621                         goto again;
622                 }
623                 start = state->end + 1;
624
625                 if (start > end)
626                         break;
627
628                 if (need_resched()) {
629                         spin_unlock_irq(&tree->lock);
630                         cond_resched();
631                         spin_lock_irq(&tree->lock);
632                 }
633         }
634 out:
635         spin_unlock_irq(&tree->lock);
636         return 0;
637 }
638 EXPORT_SYMBOL(wait_extent_bit);
639
640 static void set_state_bits(struct extent_io_tree *tree,
641                            struct extent_state *state,
642                            int bits)
643 {
644         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
645                 u64 range = state->end - state->start + 1;
646                 tree->dirty_bytes += range;
647         }
648         set_state_cb(tree, state, bits);
649         state->state |= bits;
650 }
651
652 /*
653  * set some bits on a range in the tree.  This may require allocations
654  * or sleeping, so the gfp mask is used to indicate what is allowed.
655  *
656  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
657  * range already has the desired bits set.  The start of the existing
658  * range is returned in failed_start in this case.
659  *
660  * [start, end] is inclusive
661  * This takes the tree lock.
662  */
663 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
664                    int exclusive, u64 *failed_start, gfp_t mask)
665 {
666         struct extent_state *state;
667         struct extent_state *prealloc = NULL;
668         struct rb_node *node;
669         unsigned long flags;
670         int err = 0;
671         int set;
672         u64 last_start;
673         u64 last_end;
674 again:
675         if (!prealloc && (mask & __GFP_WAIT)) {
676                 prealloc = alloc_extent_state(mask);
677                 if (!prealloc)
678                         return -ENOMEM;
679         }
680
681         spin_lock_irqsave(&tree->lock, flags);
682         /*
683          * this search will find all the extents that end after
684          * our range starts.
685          */
686         node = tree_search(tree, start);
687         if (!node) {
688                 err = insert_state(tree, prealloc, start, end, bits);
689                 prealloc = NULL;
690                 BUG_ON(err == -EEXIST);
691                 goto out;
692         }
693
694         state = rb_entry(node, struct extent_state, rb_node);
695         last_start = state->start;
696         last_end = state->end;
697
698         /*
699          * | ---- desired range ---- |
700          * | state |
701          *
702          * Just lock what we found and keep going
703          */
704         if (state->start == start && state->end <= end) {
705                 set = state->state & bits;
706                 if (set && exclusive) {
707                         *failed_start = state->start;
708                         err = -EEXIST;
709                         goto out;
710                 }
711                 set_state_bits(tree, state, bits);
712                 start = state->end + 1;
713                 merge_state(tree, state);
714                 goto search_again;
715         }
716
717         /*
718          *     | ---- desired range ---- |
719          * | state |
720          *   or
721          * | ------------- state -------------- |
722          *
723          * We need to split the extent we found, and may flip bits on
724          * second half.
725          *
726          * If the extent we found extends past our
727          * range, we just split and search again.  It'll get split
728          * again the next time though.
729          *
730          * If the extent we found is inside our range, we set the
731          * desired bit on it.
732          */
733         if (state->start < start) {
734                 set = state->state & bits;
735                 if (exclusive && set) {
736                         *failed_start = start;
737                         err = -EEXIST;
738                         goto out;
739                 }
740                 err = split_state(tree, state, prealloc, start);
741                 BUG_ON(err == -EEXIST);
742                 prealloc = NULL;
743                 if (err)
744                         goto out;
745                 if (state->end <= end) {
746                         set_state_bits(tree, state, bits);
747                         start = state->end + 1;
748                         merge_state(tree, state);
749                 } else {
750                         start = state->start;
751                 }
752                 goto search_again;
753         }
754         /*
755          * | ---- desired range ---- |
756          *     | state | or               | state |
757          *
758          * There's a hole, we need to insert something in it and
759          * ignore the extent we found.
760          */
761         if (state->start > start) {
762                 u64 this_end;
763                 if (end < last_start)
764                         this_end = end;
765                 else
766                         this_end = last_start -1;
767                 err = insert_state(tree, prealloc, start, this_end,
768                                    bits);
769                 prealloc = NULL;
770                 BUG_ON(err == -EEXIST);
771                 if (err)
772                         goto out;
773                 start = this_end + 1;
774                 goto search_again;
775         }
776         /*
777          * | ---- desired range ---- |
778          *                        | state |
779          * We need to split the extent, and set the bit
780          * on the first half
781          */
782         if (state->start <= end && state->end > end) {
783                 set = state->state & bits;
784                 if (exclusive && set) {
785                         *failed_start = start;
786                         err = -EEXIST;
787                         goto out;
788                 }
789                 err = split_state(tree, state, prealloc, end + 1);
790                 BUG_ON(err == -EEXIST);
791
792                 set_state_bits(tree, prealloc, bits);
793                 merge_state(tree, prealloc);
794                 prealloc = NULL;
795                 goto out;
796         }
797
798         goto search_again;
799
800 out:
801         spin_unlock_irqrestore(&tree->lock, flags);
802         if (prealloc)
803                 free_extent_state(prealloc);
804
805         return err;
806
807 search_again:
808         if (start > end)
809                 goto out;
810         spin_unlock_irqrestore(&tree->lock, flags);
811         if (mask & __GFP_WAIT)
812                 cond_resched();
813         goto again;
814 }
815 EXPORT_SYMBOL(set_extent_bit);
816
817 /* wrappers around set/clear extent bit */
818 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
819                      gfp_t mask)
820 {
821         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
822                               mask);
823 }
824 EXPORT_SYMBOL(set_extent_dirty);
825
826 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
827                        gfp_t mask)
828 {
829         return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
830 }
831 EXPORT_SYMBOL(set_extent_ordered);
832
833 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
834                     int bits, gfp_t mask)
835 {
836         return set_extent_bit(tree, start, end, bits, 0, NULL,
837                               mask);
838 }
839 EXPORT_SYMBOL(set_extent_bits);
840
841 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
842                       int bits, gfp_t mask)
843 {
844         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845 }
846 EXPORT_SYMBOL(clear_extent_bits);
847
848 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
849                      gfp_t mask)
850 {
851         return set_extent_bit(tree, start, end,
852                               EXTENT_DELALLOC | EXTENT_DIRTY,
853                               0, NULL, mask);
854 }
855 EXPORT_SYMBOL(set_extent_delalloc);
856
857 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
858                        gfp_t mask)
859 {
860         return clear_extent_bit(tree, start, end,
861                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
862 }
863 EXPORT_SYMBOL(clear_extent_dirty);
864
865 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
866                          gfp_t mask)
867 {
868         return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
869 }
870 EXPORT_SYMBOL(clear_extent_ordered);
871
872 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
873                      gfp_t mask)
874 {
875         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
876                               mask);
877 }
878 EXPORT_SYMBOL(set_extent_new);
879
880 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
881                        gfp_t mask)
882 {
883         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
884 }
885 EXPORT_SYMBOL(clear_extent_new);
886
887 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
888                         gfp_t mask)
889 {
890         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
891                               mask);
892 }
893 EXPORT_SYMBOL(set_extent_uptodate);
894
895 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
896                           gfp_t mask)
897 {
898         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
899 }
900 EXPORT_SYMBOL(clear_extent_uptodate);
901
902 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
903                          gfp_t mask)
904 {
905         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
906                               0, NULL, mask);
907 }
908 EXPORT_SYMBOL(set_extent_writeback);
909
910 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
911                            gfp_t mask)
912 {
913         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
914 }
915 EXPORT_SYMBOL(clear_extent_writeback);
916
917 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
918 {
919         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
920 }
921 EXPORT_SYMBOL(wait_on_extent_writeback);
922
923 /*
924  * either insert or lock state struct between start and end use mask to tell
925  * us if waiting is desired.
926  */
927 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
928 {
929         int err;
930         u64 failed_start;
931         while (1) {
932                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
933                                      &failed_start, mask);
934                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
935                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
936                         start = failed_start;
937                 } else {
938                         break;
939                 }
940                 WARN_ON(start > end);
941         }
942         return err;
943 }
944 EXPORT_SYMBOL(lock_extent);
945
946 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
947                     gfp_t mask)
948 {
949         int err;
950         u64 failed_start;
951
952         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
953                              &failed_start, mask);
954         if (err == -EEXIST) {
955                 if (failed_start > start)
956                         clear_extent_bit(tree, start, failed_start - 1,
957                                          EXTENT_LOCKED, 1, 0, mask);
958                 return 0;
959         }
960         return 1;
961 }
962 EXPORT_SYMBOL(try_lock_extent);
963
964 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
965                   gfp_t mask)
966 {
967         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
968 }
969 EXPORT_SYMBOL(unlock_extent);
970
971 /*
972  * helper function to set pages and extents in the tree dirty
973  */
974 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
975 {
976         unsigned long index = start >> PAGE_CACHE_SHIFT;
977         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
978         struct page *page;
979
980         while (index <= end_index) {
981                 page = find_get_page(tree->mapping, index);
982                 BUG_ON(!page);
983                 __set_page_dirty_nobuffers(page);
984                 page_cache_release(page);
985                 index++;
986         }
987         set_extent_dirty(tree, start, end, GFP_NOFS);
988         return 0;
989 }
990 EXPORT_SYMBOL(set_range_dirty);
991
992 /*
993  * helper function to set both pages and extents in the tree writeback
994  */
995 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
996 {
997         unsigned long index = start >> PAGE_CACHE_SHIFT;
998         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
999         struct page *page;
1000
1001         while (index <= end_index) {
1002                 page = find_get_page(tree->mapping, index);
1003                 BUG_ON(!page);
1004                 set_page_writeback(page);
1005                 page_cache_release(page);
1006                 index++;
1007         }
1008         set_extent_writeback(tree, start, end, GFP_NOFS);
1009         return 0;
1010 }
1011 EXPORT_SYMBOL(set_range_writeback);
1012
1013 /*
1014  * find the first offset in the io tree with 'bits' set. zero is
1015  * returned if we find something, and *start_ret and *end_ret are
1016  * set to reflect the state struct that was found.
1017  *
1018  * If nothing was found, 1 is returned, < 0 on error
1019  */
1020 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1021                           u64 *start_ret, u64 *end_ret, int bits)
1022 {
1023         struct rb_node *node;
1024         struct extent_state *state;
1025         int ret = 1;
1026
1027         spin_lock_irq(&tree->lock);
1028         /*
1029          * this search will find all the extents that end after
1030          * our range starts.
1031          */
1032         node = tree_search(tree, start);
1033         if (!node) {
1034                 goto out;
1035         }
1036
1037         while(1) {
1038                 state = rb_entry(node, struct extent_state, rb_node);
1039                 if (state->end >= start && (state->state & bits)) {
1040                         *start_ret = state->start;
1041                         *end_ret = state->end;
1042                         ret = 0;
1043                         break;
1044                 }
1045                 node = rb_next(node);
1046                 if (!node)
1047                         break;
1048         }
1049 out:
1050         spin_unlock_irq(&tree->lock);
1051         return ret;
1052 }
1053 EXPORT_SYMBOL(find_first_extent_bit);
1054
1055 /* find the first state struct with 'bits' set after 'start', and
1056  * return it.  tree->lock must be held.  NULL will returned if
1057  * nothing was found after 'start'
1058  */
1059 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1060                                                  u64 start, int bits)
1061 {
1062         struct rb_node *node;
1063         struct extent_state *state;
1064
1065         /*
1066          * this search will find all the extents that end after
1067          * our range starts.
1068          */
1069         node = tree_search(tree, start);
1070         if (!node) {
1071                 goto out;
1072         }
1073
1074         while(1) {
1075                 state = rb_entry(node, struct extent_state, rb_node);
1076                 if (state->end >= start && (state->state & bits)) {
1077                         return state;
1078                 }
1079                 node = rb_next(node);
1080                 if (!node)
1081                         break;
1082         }
1083 out:
1084         return NULL;
1085 }
1086 EXPORT_SYMBOL(find_first_extent_bit_state);
1087
1088 /*
1089  * find a contiguous range of bytes in the file marked as delalloc, not
1090  * more than 'max_bytes'.  start and end are used to return the range,
1091  *
1092  * 1 is returned if we find something, 0 if nothing was in the tree
1093  */
1094 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1095                                         u64 *start, u64 *end, u64 max_bytes)
1096 {
1097         struct rb_node *node;
1098         struct extent_state *state;
1099         u64 cur_start = *start;
1100         u64 found = 0;
1101         u64 total_bytes = 0;
1102
1103         spin_lock_irq(&tree->lock);
1104
1105         /*
1106          * this search will find all the extents that end after
1107          * our range starts.
1108          */
1109         node = tree_search(tree, cur_start);
1110         if (!node) {
1111                 if (!found)
1112                         *end = (u64)-1;
1113                 goto out;
1114         }
1115
1116         while(1) {
1117                 state = rb_entry(node, struct extent_state, rb_node);
1118                 if (found && (state->start != cur_start ||
1119                               (state->state & EXTENT_BOUNDARY))) {
1120                         goto out;
1121                 }
1122                 if (!(state->state & EXTENT_DELALLOC)) {
1123                         if (!found)
1124                                 *end = state->end;
1125                         goto out;
1126                 }
1127                 if (!found)
1128                         *start = state->start;
1129                 found++;
1130                 *end = state->end;
1131                 cur_start = state->end + 1;
1132                 node = rb_next(node);
1133                 if (!node)
1134                         break;
1135                 total_bytes += state->end - state->start + 1;
1136                 if (total_bytes >= max_bytes)
1137                         break;
1138         }
1139 out:
1140         spin_unlock_irq(&tree->lock);
1141         return found;
1142 }
1143
1144 static noinline int __unlock_for_delalloc(struct inode *inode,
1145                                           struct page *locked_page,
1146                                           u64 start, u64 end)
1147 {
1148         int ret;
1149         struct page *pages[16];
1150         unsigned long index = start >> PAGE_CACHE_SHIFT;
1151         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1152         unsigned long nr_pages = end_index - index + 1;
1153         int i;
1154
1155         if (index == locked_page->index && end_index == index)
1156                 return 0;
1157
1158         while(nr_pages > 0) {
1159                 ret = find_get_pages_contig(inode->i_mapping, index,
1160                                      min(nr_pages, ARRAY_SIZE(pages)), pages);
1161                 for (i = 0; i < ret; i++) {
1162                         if (pages[i] != locked_page)
1163                                 unlock_page(pages[i]);
1164                         page_cache_release(pages[i]);
1165                 }
1166                 nr_pages -= ret;
1167                 index += ret;
1168                 cond_resched();
1169         }
1170         return 0;
1171 }
1172
1173 static noinline int lock_delalloc_pages(struct inode *inode,
1174                                         struct page *locked_page,
1175                                         u64 delalloc_start,
1176                                         u64 delalloc_end)
1177 {
1178         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1179         unsigned long start_index = index;
1180         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1181         unsigned long pages_locked = 0;
1182         struct page *pages[16];
1183         unsigned long nrpages;
1184         int ret;
1185         int i;
1186
1187         /* the caller is responsible for locking the start index */
1188         if (index == locked_page->index && index == end_index)
1189                 return 0;
1190
1191         /* skip the page at the start index */
1192         nrpages = end_index - index + 1;
1193         while(nrpages > 0) {
1194                 ret = find_get_pages_contig(inode->i_mapping, index,
1195                                      min(nrpages, ARRAY_SIZE(pages)), pages);
1196                 if (ret == 0) {
1197                         ret = -EAGAIN;
1198                         goto done;
1199                 }
1200                 /* now we have an array of pages, lock them all */
1201                 for (i = 0; i < ret; i++) {
1202                         /*
1203                          * the caller is taking responsibility for
1204                          * locked_page
1205                          */
1206                         if (pages[i] != locked_page) {
1207                                 lock_page(pages[i]);
1208                                 if (pages[i]->mapping != inode->i_mapping) {
1209                                         ret = -EAGAIN;
1210                                         unlock_page(pages[i]);
1211                                         page_cache_release(pages[i]);
1212                                         goto done;
1213                                 }
1214                         }
1215                         page_cache_release(pages[i]);
1216                         pages_locked++;
1217                 }
1218                 nrpages -= ret;
1219                 index += ret;
1220                 cond_resched();
1221         }
1222         ret = 0;
1223 done:
1224         if (ret && pages_locked) {
1225                 __unlock_for_delalloc(inode, locked_page,
1226                               delalloc_start,
1227                               ((u64)(start_index + pages_locked - 1)) <<
1228                               PAGE_CACHE_SHIFT);
1229         }
1230         return ret;
1231 }
1232
1233 /*
1234  * find a contiguous range of bytes in the file marked as delalloc, not
1235  * more than 'max_bytes'.  start and end are used to return the range,
1236  *
1237  * 1 is returned if we find something, 0 if nothing was in the tree
1238  */
1239 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1240                                              struct extent_io_tree *tree,
1241                                              struct page *locked_page,
1242                                              u64 *start, u64 *end,
1243                                              u64 max_bytes)
1244 {
1245         u64 delalloc_start;
1246         u64 delalloc_end;
1247         u64 found;
1248         int ret;
1249         int loops = 0;
1250
1251 again:
1252         /* step one, find a bunch of delalloc bytes starting at start */
1253         delalloc_start = *start;
1254         delalloc_end = 0;
1255         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1256                                     max_bytes);
1257         if (!found || delalloc_end <= *start) {
1258                 *start = delalloc_start;
1259                 *end = delalloc_end;
1260                 return found;
1261         }
1262
1263         /*
1264          * start comes from the offset of locked_page.  We have to lock
1265          * pages in order, so we can't process delalloc bytes before
1266          * locked_page
1267          */
1268         if (delalloc_start < *start) {
1269                 delalloc_start = *start;
1270         }
1271
1272         /*
1273          * make sure to limit the number of pages we try to lock down
1274          * if we're looping.
1275          */
1276         if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1277                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1278         }
1279         /* step two, lock all the pages after the page that has start */
1280         ret = lock_delalloc_pages(inode, locked_page,
1281                                   delalloc_start, delalloc_end);
1282         if (ret == -EAGAIN) {
1283                 /* some of the pages are gone, lets avoid looping by
1284                  * shortening the size of the delalloc range we're searching
1285                  */
1286                 if (!loops) {
1287                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1288                         max_bytes = PAGE_CACHE_SIZE - offset;
1289                         loops = 1;
1290                         goto again;
1291                 } else {
1292                         found = 0;
1293                         goto out_failed;
1294                 }
1295         }
1296         BUG_ON(ret);
1297
1298         /* step three, lock the state bits for the whole range */
1299         lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1300
1301         /* then test to make sure it is all still delalloc */
1302         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1303                              EXTENT_DELALLOC, 1);
1304         if (!ret) {
1305                 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1306                 __unlock_for_delalloc(inode, locked_page,
1307                               delalloc_start, delalloc_end);
1308                 cond_resched();
1309                 goto again;
1310         }
1311         *start = delalloc_start;
1312         *end = delalloc_end;
1313 out_failed:
1314         return found;
1315 }
1316
1317 int extent_clear_unlock_delalloc(struct inode *inode,
1318                                 struct extent_io_tree *tree,
1319                                 u64 start, u64 end, struct page *locked_page,
1320                                 int unlock_pages,
1321                                 int clear_unlock,
1322                                 int clear_delalloc, int clear_dirty,
1323                                 int set_writeback,
1324                                 int end_writeback)
1325 {
1326         int ret;
1327         struct page *pages[16];
1328         unsigned long index = start >> PAGE_CACHE_SHIFT;
1329         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1330         unsigned long nr_pages = end_index - index + 1;
1331         int i;
1332         int clear_bits = 0;
1333
1334         if (clear_unlock)
1335                 clear_bits |= EXTENT_LOCKED;
1336         if (clear_dirty)
1337                 clear_bits |= EXTENT_DIRTY;
1338
1339         if (clear_delalloc)
1340                 clear_bits |= EXTENT_DELALLOC;
1341
1342         clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1343         if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1344                 return 0;
1345
1346         while(nr_pages > 0) {
1347                 ret = find_get_pages_contig(inode->i_mapping, index,
1348                                      min(nr_pages, ARRAY_SIZE(pages)), pages);
1349                 for (i = 0; i < ret; i++) {
1350                         if (pages[i] == locked_page) {
1351                                 page_cache_release(pages[i]);
1352                                 continue;
1353                         }
1354                         if (clear_dirty)
1355                                 clear_page_dirty_for_io(pages[i]);
1356                         if (set_writeback)
1357                                 set_page_writeback(pages[i]);
1358                         if (end_writeback)
1359                                 end_page_writeback(pages[i]);
1360                         if (unlock_pages)
1361                                 unlock_page(pages[i]);
1362                         page_cache_release(pages[i]);
1363                 }
1364                 nr_pages -= ret;
1365                 index += ret;
1366                 cond_resched();
1367         }
1368         return 0;
1369 }
1370 EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1371
1372 /*
1373  * count the number of bytes in the tree that have a given bit(s)
1374  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1375  * cached.  The total number found is returned.
1376  */
1377 u64 count_range_bits(struct extent_io_tree *tree,
1378                      u64 *start, u64 search_end, u64 max_bytes,
1379                      unsigned long bits)
1380 {
1381         struct rb_node *node;
1382         struct extent_state *state;
1383         u64 cur_start = *start;
1384         u64 total_bytes = 0;
1385         int found = 0;
1386
1387         if (search_end <= cur_start) {
1388                 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1389                 WARN_ON(1);
1390                 return 0;
1391         }
1392
1393         spin_lock_irq(&tree->lock);
1394         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1395                 total_bytes = tree->dirty_bytes;
1396                 goto out;
1397         }
1398         /*
1399          * this search will find all the extents that end after
1400          * our range starts.
1401          */
1402         node = tree_search(tree, cur_start);
1403         if (!node) {
1404                 goto out;
1405         }
1406
1407         while(1) {
1408                 state = rb_entry(node, struct extent_state, rb_node);
1409                 if (state->start > search_end)
1410                         break;
1411                 if (state->end >= cur_start && (state->state & bits)) {
1412                         total_bytes += min(search_end, state->end) + 1 -
1413                                        max(cur_start, state->start);
1414                         if (total_bytes >= max_bytes)
1415                                 break;
1416                         if (!found) {
1417                                 *start = state->start;
1418                                 found = 1;
1419                         }
1420                 }
1421                 node = rb_next(node);
1422                 if (!node)
1423                         break;
1424         }
1425 out:
1426         spin_unlock_irq(&tree->lock);
1427         return total_bytes;
1428 }
1429 /*
1430  * helper function to lock both pages and extents in the tree.
1431  * pages must be locked first.
1432  */
1433 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1434 {
1435         unsigned long index = start >> PAGE_CACHE_SHIFT;
1436         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1437         struct page *page;
1438         int err;
1439
1440         while (index <= end_index) {
1441                 page = grab_cache_page(tree->mapping, index);
1442                 if (!page) {
1443                         err = -ENOMEM;
1444                         goto failed;
1445                 }
1446                 if (IS_ERR(page)) {
1447                         err = PTR_ERR(page);
1448                         goto failed;
1449                 }
1450                 index++;
1451         }
1452         lock_extent(tree, start, end, GFP_NOFS);
1453         return 0;
1454
1455 failed:
1456         /*
1457          * we failed above in getting the page at 'index', so we undo here
1458          * up to but not including the page at 'index'
1459          */
1460         end_index = index;
1461         index = start >> PAGE_CACHE_SHIFT;
1462         while (index < end_index) {
1463                 page = find_get_page(tree->mapping, index);
1464                 unlock_page(page);
1465                 page_cache_release(page);
1466                 index++;
1467         }
1468         return err;
1469 }
1470 EXPORT_SYMBOL(lock_range);
1471
1472 /*
1473  * helper function to unlock both pages and extents in the tree.
1474  */
1475 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1476 {
1477         unsigned long index = start >> PAGE_CACHE_SHIFT;
1478         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1479         struct page *page;
1480
1481         while (index <= end_index) {
1482                 page = find_get_page(tree->mapping, index);
1483                 unlock_page(page);
1484                 page_cache_release(page);
1485                 index++;
1486         }
1487         unlock_extent(tree, start, end, GFP_NOFS);
1488         return 0;
1489 }
1490 EXPORT_SYMBOL(unlock_range);
1491
1492 /*
1493  * set the private field for a given byte offset in the tree.  If there isn't
1494  * an extent_state there already, this does nothing.
1495  */
1496 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1497 {
1498         struct rb_node *node;
1499         struct extent_state *state;
1500         int ret = 0;
1501
1502         spin_lock_irq(&tree->lock);
1503         /*
1504          * this search will find all the extents that end after
1505          * our range starts.
1506          */
1507         node = tree_search(tree, start);
1508         if (!node) {
1509                 ret = -ENOENT;
1510                 goto out;
1511         }
1512         state = rb_entry(node, struct extent_state, rb_node);
1513         if (state->start != start) {
1514                 ret = -ENOENT;
1515                 goto out;
1516         }
1517         state->private = private;
1518 out:
1519         spin_unlock_irq(&tree->lock);
1520         return ret;
1521 }
1522
1523 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1524 {
1525         struct rb_node *node;
1526         struct extent_state *state;
1527         int ret = 0;
1528
1529         spin_lock_irq(&tree->lock);
1530         /*
1531          * this search will find all the extents that end after
1532          * our range starts.
1533          */
1534         node = tree_search(tree, start);
1535         if (!node) {
1536                 ret = -ENOENT;
1537                 goto out;
1538         }
1539         state = rb_entry(node, struct extent_state, rb_node);
1540         if (state->start != start) {
1541                 ret = -ENOENT;
1542                 goto out;
1543         }
1544         *private = state->private;
1545 out:
1546         spin_unlock_irq(&tree->lock);
1547         return ret;
1548 }
1549
1550 /*
1551  * searches a range in the state tree for a given mask.
1552  * If 'filled' == 1, this returns 1 only if every extent in the tree
1553  * has the bits set.  Otherwise, 1 is returned if any bit in the
1554  * range is found set.
1555  */
1556 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1557                    int bits, int filled)
1558 {
1559         struct extent_state *state = NULL;
1560         struct rb_node *node;
1561         int bitset = 0;
1562         unsigned long flags;
1563
1564         spin_lock_irqsave(&tree->lock, flags);
1565         node = tree_search(tree, start);
1566         while (node && start <= end) {
1567                 state = rb_entry(node, struct extent_state, rb_node);
1568
1569                 if (filled && state->start > start) {
1570                         bitset = 0;
1571                         break;
1572                 }
1573
1574                 if (state->start > end)
1575                         break;
1576
1577                 if (state->state & bits) {
1578                         bitset = 1;
1579                         if (!filled)
1580                                 break;
1581                 } else if (filled) {
1582                         bitset = 0;
1583                         break;
1584                 }
1585                 start = state->end + 1;
1586                 if (start > end)
1587                         break;
1588                 node = rb_next(node);
1589                 if (!node) {
1590                         if (filled)
1591                                 bitset = 0;
1592                         break;
1593                 }
1594         }
1595         spin_unlock_irqrestore(&tree->lock, flags);
1596         return bitset;
1597 }
1598 EXPORT_SYMBOL(test_range_bit);
1599
1600 /*
1601  * helper function to set a given page up to date if all the
1602  * extents in the tree for that page are up to date
1603  */
1604 static int check_page_uptodate(struct extent_io_tree *tree,
1605                                struct page *page)
1606 {
1607         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1608         u64 end = start + PAGE_CACHE_SIZE - 1;
1609         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1610                 SetPageUptodate(page);
1611         return 0;
1612 }
1613
1614 /*
1615  * helper function to unlock a page if all the extents in the tree
1616  * for that page are unlocked
1617  */
1618 static int check_page_locked(struct extent_io_tree *tree,
1619                              struct page *page)
1620 {
1621         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1622         u64 end = start + PAGE_CACHE_SIZE - 1;
1623         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1624                 unlock_page(page);
1625         return 0;
1626 }
1627
1628 /*
1629  * helper function to end page writeback if all the extents
1630  * in the tree for that page are done with writeback
1631  */
1632 static int check_page_writeback(struct extent_io_tree *tree,
1633                              struct page *page)
1634 {
1635         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1636         u64 end = start + PAGE_CACHE_SIZE - 1;
1637         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1638                 end_page_writeback(page);
1639         return 0;
1640 }
1641
1642 /* lots and lots of room for performance fixes in the end_bio funcs */
1643
1644 /*
1645  * after a writepage IO is done, we need to:
1646  * clear the uptodate bits on error
1647  * clear the writeback bits in the extent tree for this IO
1648  * end_page_writeback if the page has no more pending IO
1649  *
1650  * Scheduling is not allowed, so the extent state tree is expected
1651  * to have one and only one object corresponding to this IO.
1652  */
1653 static void end_bio_extent_writepage(struct bio *bio, int err)
1654 {
1655         int uptodate = err == 0;
1656         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1657         struct extent_io_tree *tree;
1658         u64 start;
1659         u64 end;
1660         int whole_page;
1661         int ret;
1662
1663         do {
1664                 struct page *page = bvec->bv_page;
1665                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1666
1667                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1668                          bvec->bv_offset;
1669                 end = start + bvec->bv_len - 1;
1670
1671                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1672                         whole_page = 1;
1673                 else
1674                         whole_page = 0;
1675
1676                 if (--bvec >= bio->bi_io_vec)
1677                         prefetchw(&bvec->bv_page->flags);
1678                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1679                         ret = tree->ops->writepage_end_io_hook(page, start,
1680                                                        end, NULL, uptodate);
1681                         if (ret)
1682                                 uptodate = 0;
1683                 }
1684
1685                 if (!uptodate && tree->ops &&
1686                     tree->ops->writepage_io_failed_hook) {
1687                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1688                                                          start, end, NULL);
1689                         if (ret == 0) {
1690                                 uptodate = (err == 0);
1691                                 continue;
1692                         }
1693                 }
1694
1695                 if (!uptodate) {
1696                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1697                         ClearPageUptodate(page);
1698                         SetPageError(page);
1699                 }
1700
1701                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1702
1703                 if (whole_page)
1704                         end_page_writeback(page);
1705                 else
1706                         check_page_writeback(tree, page);
1707         } while (bvec >= bio->bi_io_vec);
1708
1709         bio_put(bio);
1710 }
1711
1712 /*
1713  * after a readpage IO is done, we need to:
1714  * clear the uptodate bits on error
1715  * set the uptodate bits if things worked
1716  * set the page up to date if all extents in the tree are uptodate
1717  * clear the lock bit in the extent tree
1718  * unlock the page if there are no other extents locked for it
1719  *
1720  * Scheduling is not allowed, so the extent state tree is expected
1721  * to have one and only one object corresponding to this IO.
1722  */
1723 static void end_bio_extent_readpage(struct bio *bio, int err)
1724 {
1725         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1726         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1727         struct extent_io_tree *tree;
1728         u64 start;
1729         u64 end;
1730         int whole_page;
1731         int ret;
1732
1733         do {
1734                 struct page *page = bvec->bv_page;
1735                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1736
1737                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1738                         bvec->bv_offset;
1739                 end = start + bvec->bv_len - 1;
1740
1741                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1742                         whole_page = 1;
1743                 else
1744                         whole_page = 0;
1745
1746                 if (--bvec >= bio->bi_io_vec)
1747                         prefetchw(&bvec->bv_page->flags);
1748
1749                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1750                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1751                                                               NULL);
1752                         if (ret)
1753                                 uptodate = 0;
1754                 }
1755                 if (!uptodate && tree->ops &&
1756                     tree->ops->readpage_io_failed_hook) {
1757                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1758                                                          start, end, NULL);
1759                         if (ret == 0) {
1760                                 uptodate =
1761                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1762                                 continue;
1763                         }
1764                 }
1765
1766                 if (uptodate) {
1767                         set_extent_uptodate(tree, start, end,
1768                                             GFP_ATOMIC);
1769                 }
1770                 unlock_extent(tree, start, end, GFP_ATOMIC);
1771
1772                 if (whole_page) {
1773                         if (uptodate) {
1774                                 SetPageUptodate(page);
1775                         } else {
1776                                 ClearPageUptodate(page);
1777                                 SetPageError(page);
1778                         }
1779                         unlock_page(page);
1780                 } else {
1781                         if (uptodate) {
1782                                 check_page_uptodate(tree, page);
1783                         } else {
1784                                 ClearPageUptodate(page);
1785                                 SetPageError(page);
1786                         }
1787                         check_page_locked(tree, page);
1788                 }
1789         } while (bvec >= bio->bi_io_vec);
1790
1791         bio_put(bio);
1792 }
1793
1794 /*
1795  * IO done from prepare_write is pretty simple, we just unlock
1796  * the structs in the extent tree when done, and set the uptodate bits
1797  * as appropriate.
1798  */
1799 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1800 {
1801         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1802         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1803         struct extent_io_tree *tree;
1804         u64 start;
1805         u64 end;
1806
1807         do {
1808                 struct page *page = bvec->bv_page;
1809                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1810
1811                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1812                         bvec->bv_offset;
1813                 end = start + bvec->bv_len - 1;
1814
1815                 if (--bvec >= bio->bi_io_vec)
1816                         prefetchw(&bvec->bv_page->flags);
1817
1818                 if (uptodate) {
1819                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1820                 } else {
1821                         ClearPageUptodate(page);
1822                         SetPageError(page);
1823                 }
1824
1825                 unlock_extent(tree, start, end, GFP_ATOMIC);
1826
1827         } while (bvec >= bio->bi_io_vec);
1828
1829         bio_put(bio);
1830 }
1831
1832 static struct bio *
1833 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1834                  gfp_t gfp_flags)
1835 {
1836         struct bio *bio;
1837
1838         bio = bio_alloc(gfp_flags, nr_vecs);
1839
1840         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1841                 while (!bio && (nr_vecs /= 2))
1842                         bio = bio_alloc(gfp_flags, nr_vecs);
1843         }
1844
1845         if (bio) {
1846                 bio->bi_size = 0;
1847                 bio->bi_bdev = bdev;
1848                 bio->bi_sector = first_sector;
1849         }
1850         return bio;
1851 }
1852
1853 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1854                           unsigned long bio_flags)
1855 {
1856         int ret = 0;
1857         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1858         struct page *page = bvec->bv_page;
1859         struct extent_io_tree *tree = bio->bi_private;
1860         u64 start;
1861         u64 end;
1862
1863         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1864         end = start + bvec->bv_len - 1;
1865
1866         bio->bi_private = NULL;
1867
1868         bio_get(bio);
1869
1870         if (tree->ops && tree->ops->submit_bio_hook)
1871                 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1872                                            mirror_num, bio_flags);
1873         else
1874                 submit_bio(rw, bio);
1875         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1876                 ret = -EOPNOTSUPP;
1877         bio_put(bio);
1878         return ret;
1879 }
1880
1881 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1882                               struct page *page, sector_t sector,
1883                               size_t size, unsigned long offset,
1884                               struct block_device *bdev,
1885                               struct bio **bio_ret,
1886                               unsigned long max_pages,
1887                               bio_end_io_t end_io_func,
1888                               int mirror_num,
1889                               unsigned long prev_bio_flags,
1890                               unsigned long bio_flags)
1891 {
1892         int ret = 0;
1893         struct bio *bio;
1894         int nr;
1895         int contig = 0;
1896         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1897         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1898         size_t page_size = min(size, PAGE_CACHE_SIZE);
1899
1900         if (bio_ret && *bio_ret) {
1901                 bio = *bio_ret;
1902                 if (old_compressed)
1903                         contig = bio->bi_sector == sector;
1904                 else
1905                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
1906                                 sector;
1907
1908                 if (prev_bio_flags != bio_flags || !contig ||
1909                     (tree->ops && tree->ops->merge_bio_hook &&
1910                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
1911                                                bio_flags)) ||
1912                     bio_add_page(bio, page, page_size, offset) < page_size) {
1913                         ret = submit_one_bio(rw, bio, mirror_num,
1914                                              prev_bio_flags);
1915                         bio = NULL;
1916                 } else {
1917                         return 0;
1918                 }
1919         }
1920         if (this_compressed)
1921                 nr = BIO_MAX_PAGES;
1922         else
1923                 nr = bio_get_nr_vecs(bdev);
1924
1925         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1926         if (!bio) {
1927                 printk("failed to allocate bio nr %d\n", nr);
1928         }
1929
1930         bio_add_page(bio, page, page_size, offset);
1931         bio->bi_end_io = end_io_func;
1932         bio->bi_private = tree;
1933
1934         if (bio_ret) {
1935                 *bio_ret = bio;
1936         } else {
1937                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1938         }
1939
1940         return ret;
1941 }
1942
1943 void set_page_extent_mapped(struct page *page)
1944 {
1945         if (!PagePrivate(page)) {
1946                 SetPagePrivate(page);
1947                 page_cache_get(page);
1948                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1949         }
1950 }
1951 EXPORT_SYMBOL(set_page_extent_mapped);
1952
1953 void set_page_extent_head(struct page *page, unsigned long len)
1954 {
1955         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1956 }
1957
1958 /*
1959  * basic readpage implementation.  Locked extent state structs are inserted
1960  * into the tree that are removed when the IO is done (by the end_io
1961  * handlers)
1962  */
1963 static int __extent_read_full_page(struct extent_io_tree *tree,
1964                                    struct page *page,
1965                                    get_extent_t *get_extent,
1966                                    struct bio **bio, int mirror_num,
1967                                    unsigned long *bio_flags)
1968 {
1969         struct inode *inode = page->mapping->host;
1970         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1971         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1972         u64 end;
1973         u64 cur = start;
1974         u64 extent_offset;
1975         u64 last_byte = i_size_read(inode);
1976         u64 block_start;
1977         u64 cur_end;
1978         sector_t sector;
1979         struct extent_map *em;
1980         struct block_device *bdev;
1981         int ret;
1982         int nr = 0;
1983         size_t page_offset = 0;
1984         size_t iosize;
1985         size_t disk_io_size;
1986         size_t blocksize = inode->i_sb->s_blocksize;
1987         unsigned long this_bio_flag = 0;
1988
1989         set_page_extent_mapped(page);
1990
1991         end = page_end;
1992         lock_extent(tree, start, end, GFP_NOFS);
1993
1994         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1995                 char *userpage;
1996                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1997
1998                 if (zero_offset) {
1999                         iosize = PAGE_CACHE_SIZE - zero_offset;
2000                         userpage = kmap_atomic(page, KM_USER0);
2001                         memset(userpage + zero_offset, 0, iosize);
2002                         flush_dcache_page(page);
2003                         kunmap_atomic(userpage, KM_USER0);
2004                 }
2005         }
2006         while (cur <= end) {
2007                 if (cur >= last_byte) {
2008                         char *userpage;
2009                         iosize = PAGE_CACHE_SIZE - page_offset;
2010                         userpage = kmap_atomic(page, KM_USER0);
2011                         memset(userpage + page_offset, 0, iosize);
2012                         flush_dcache_page(page);
2013                         kunmap_atomic(userpage, KM_USER0);
2014                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2015                                             GFP_NOFS);
2016                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2017                         break;
2018                 }
2019                 em = get_extent(inode, page, page_offset, cur,
2020                                 end - cur + 1, 0);
2021                 if (IS_ERR(em) || !em) {
2022                         SetPageError(page);
2023                         unlock_extent(tree, cur, end, GFP_NOFS);
2024                         break;
2025                 }
2026                 extent_offset = cur - em->start;
2027                 if (extent_map_end(em) <= cur) {
2028 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
2029                 }
2030                 BUG_ON(extent_map_end(em) <= cur);
2031                 if (end < cur) {
2032 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
2033                 }
2034                 BUG_ON(end < cur);
2035
2036                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2037                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2038
2039                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2040                 cur_end = min(extent_map_end(em) - 1, end);
2041                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2042                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2043                         disk_io_size = em->block_len;
2044                         sector = em->block_start >> 9;
2045                 } else {
2046                         sector = (em->block_start + extent_offset) >> 9;
2047                         disk_io_size = iosize;
2048                 }
2049                 bdev = em->bdev;
2050                 block_start = em->block_start;
2051                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2052                         block_start = EXTENT_MAP_HOLE;
2053                 free_extent_map(em);
2054                 em = NULL;
2055
2056                 /* we've found a hole, just zero and go on */
2057                 if (block_start == EXTENT_MAP_HOLE) {
2058                         char *userpage;
2059                         userpage = kmap_atomic(page, KM_USER0);
2060                         memset(userpage + page_offset, 0, iosize);
2061                         flush_dcache_page(page);
2062                         kunmap_atomic(userpage, KM_USER0);
2063
2064                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2065                                             GFP_NOFS);
2066                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2067                         cur = cur + iosize;
2068                         page_offset += iosize;
2069                         continue;
2070                 }
2071                 /* the get_extent function already copied into the page */
2072                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2073                         check_page_uptodate(tree, page);
2074                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2075                         cur = cur + iosize;
2076                         page_offset += iosize;
2077                         continue;
2078                 }
2079                 /* we have an inline extent but it didn't get marked up
2080                  * to date.  Error out
2081                  */
2082                 if (block_start == EXTENT_MAP_INLINE) {
2083                         SetPageError(page);
2084                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2085                         cur = cur + iosize;
2086                         page_offset += iosize;
2087                         continue;
2088                 }
2089
2090                 ret = 0;
2091                 if (tree->ops && tree->ops->readpage_io_hook) {
2092                         ret = tree->ops->readpage_io_hook(page, cur,
2093                                                           cur + iosize - 1);
2094                 }
2095                 if (!ret) {
2096                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2097                         pnr -= page->index;
2098                         ret = submit_extent_page(READ, tree, page,
2099                                          sector, disk_io_size, page_offset,
2100                                          bdev, bio, pnr,
2101                                          end_bio_extent_readpage, mirror_num,
2102                                          *bio_flags,
2103                                          this_bio_flag);
2104                         nr++;
2105                         *bio_flags = this_bio_flag;
2106                 }
2107                 if (ret)
2108                         SetPageError(page);
2109                 cur = cur + iosize;
2110                 page_offset += iosize;
2111         }
2112         if (!nr) {
2113                 if (!PageError(page))
2114                         SetPageUptodate(page);
2115                 unlock_page(page);
2116         }
2117         return 0;
2118 }
2119
2120 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2121                             get_extent_t *get_extent)
2122 {
2123         struct bio *bio = NULL;
2124         unsigned long bio_flags = 0;
2125         int ret;
2126
2127         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2128                                       &bio_flags);
2129         if (bio)
2130                 submit_one_bio(READ, bio, 0, bio_flags);
2131         return ret;
2132 }
2133 EXPORT_SYMBOL(extent_read_full_page);
2134
2135 /*
2136  * the writepage semantics are similar to regular writepage.  extent
2137  * records are inserted to lock ranges in the tree, and as dirty areas
2138  * are found, they are marked writeback.  Then the lock bits are removed
2139  * and the end_io handler clears the writeback ranges
2140  */
2141 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2142                               void *data)
2143 {
2144         struct inode *inode = page->mapping->host;
2145         struct extent_page_data *epd = data;
2146         struct extent_io_tree *tree = epd->tree;
2147         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2148         u64 delalloc_start;
2149         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2150         u64 end;
2151         u64 cur = start;
2152         u64 extent_offset;
2153         u64 last_byte = i_size_read(inode);
2154         u64 block_start;
2155         u64 iosize;
2156         u64 unlock_start;
2157         sector_t sector;
2158         struct extent_map *em;
2159         struct block_device *bdev;
2160         int ret;
2161         int nr = 0;
2162         size_t pg_offset = 0;
2163         size_t blocksize;
2164         loff_t i_size = i_size_read(inode);
2165         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2166         u64 nr_delalloc;
2167         u64 delalloc_end;
2168         int page_started;
2169         int compressed;
2170         unsigned long nr_written = 0;
2171
2172         WARN_ON(!PageLocked(page));
2173         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2174         if (page->index > end_index ||
2175            (page->index == end_index && !pg_offset)) {
2176                 if (epd->extent_locked) {
2177                         if (tree->ops && tree->ops->writepage_end_io_hook)
2178                                 tree->ops->writepage_end_io_hook(page, start,
2179                                                          page_end, NULL, 1);
2180                 }
2181                 unlock_page(page);
2182                 return 0;
2183         }
2184
2185         if (page->index == end_index) {
2186                 char *userpage;
2187
2188                 userpage = kmap_atomic(page, KM_USER0);
2189                 memset(userpage + pg_offset, 0,
2190                        PAGE_CACHE_SIZE - pg_offset);
2191                 kunmap_atomic(userpage, KM_USER0);
2192                 flush_dcache_page(page);
2193         }
2194         pg_offset = 0;
2195
2196         set_page_extent_mapped(page);
2197
2198         delalloc_start = start;
2199         delalloc_end = 0;
2200         page_started = 0;
2201         if (!epd->extent_locked) {
2202                 while(delalloc_end < page_end) {
2203                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2204                                                        page,
2205                                                        &delalloc_start,
2206                                                        &delalloc_end,
2207                                                        128 * 1024 * 1024);
2208                         if (nr_delalloc == 0) {
2209                                 delalloc_start = delalloc_end + 1;
2210                                 continue;
2211                         }
2212                         tree->ops->fill_delalloc(inode, page, delalloc_start,
2213                                                  delalloc_end, &page_started,
2214                                                  &nr_written);
2215                         delalloc_start = delalloc_end + 1;
2216                 }
2217
2218                 /* did the fill delalloc function already unlock and start
2219                  * the IO?
2220                  */
2221                 if (page_started) {
2222                         ret = 0;
2223                         goto update_nr_written;
2224                 }
2225         }
2226         lock_extent(tree, start, page_end, GFP_NOFS);
2227
2228         unlock_start = start;
2229
2230         if (tree->ops && tree->ops->writepage_start_hook) {
2231                 ret = tree->ops->writepage_start_hook(page, start,
2232                                                       page_end);
2233                 if (ret == -EAGAIN) {
2234                         unlock_extent(tree, start, page_end, GFP_NOFS);
2235                         redirty_page_for_writepage(wbc, page);
2236                         unlock_page(page);
2237                         ret = 0;
2238                         goto update_nr_written;
2239                 }
2240         }
2241
2242         nr_written++;
2243
2244         end = page_end;
2245         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2246                 printk("found delalloc bits after lock_extent\n");
2247         }
2248
2249         if (last_byte <= start) {
2250                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2251                 unlock_extent(tree, start, page_end, GFP_NOFS);
2252                 if (tree->ops && tree->ops->writepage_end_io_hook)
2253                         tree->ops->writepage_end_io_hook(page, start,
2254                                                          page_end, NULL, 1);
2255                 unlock_start = page_end + 1;
2256                 goto done;
2257         }
2258
2259         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2260         blocksize = inode->i_sb->s_blocksize;
2261
2262         while (cur <= end) {
2263                 if (cur >= last_byte) {
2264                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2265                         unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2266                         if (tree->ops && tree->ops->writepage_end_io_hook)
2267                                 tree->ops->writepage_end_io_hook(page, cur,
2268                                                          page_end, NULL, 1);
2269                         unlock_start = page_end + 1;
2270                         break;
2271                 }
2272                 em = epd->get_extent(inode, page, pg_offset, cur,
2273                                      end - cur + 1, 1);
2274                 if (IS_ERR(em) || !em) {
2275                         SetPageError(page);
2276                         break;
2277                 }
2278
2279                 extent_offset = cur - em->start;
2280                 BUG_ON(extent_map_end(em) <= cur);
2281                 BUG_ON(end < cur);
2282                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2283                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2284                 sector = (em->block_start + extent_offset) >> 9;
2285                 bdev = em->bdev;
2286                 block_start = em->block_start;
2287                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2288                 free_extent_map(em);
2289                 em = NULL;
2290
2291                 /*
2292                  * compressed and inline extents are written through other
2293                  * paths in the FS
2294                  */
2295                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2296                     block_start == EXTENT_MAP_INLINE) {
2297                         clear_extent_dirty(tree, cur,
2298                                            cur + iosize - 1, GFP_NOFS);
2299
2300                         unlock_extent(tree, unlock_start, cur + iosize -1,
2301                                       GFP_NOFS);
2302
2303                         /*
2304                          * end_io notification does not happen here for
2305                          * compressed extents
2306                          */
2307                         if (!compressed && tree->ops &&
2308                             tree->ops->writepage_end_io_hook)
2309                                 tree->ops->writepage_end_io_hook(page, cur,
2310                                                          cur + iosize - 1,
2311                                                          NULL, 1);
2312                         else if (compressed) {
2313                                 /* we don't want to end_page_writeback on
2314                                  * a compressed extent.  this happens
2315                                  * elsewhere
2316                                  */
2317                                 nr++;
2318                         }
2319
2320                         cur += iosize;
2321                         pg_offset += iosize;
2322                         unlock_start = cur;
2323                         continue;
2324                 }
2325                 /* leave this out until we have a page_mkwrite call */
2326                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2327                                    EXTENT_DIRTY, 0)) {
2328                         cur = cur + iosize;
2329                         pg_offset += iosize;
2330                         continue;
2331                 }
2332
2333                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2334                 if (tree->ops && tree->ops->writepage_io_hook) {
2335                         ret = tree->ops->writepage_io_hook(page, cur,
2336                                                 cur + iosize - 1);
2337                 } else {
2338                         ret = 0;
2339                 }
2340                 if (ret) {
2341                         SetPageError(page);
2342                 } else {
2343                         unsigned long max_nr = end_index + 1;
2344
2345                         set_range_writeback(tree, cur, cur + iosize - 1);
2346                         if (!PageWriteback(page)) {
2347                                 printk("warning page %lu not writeback, "
2348                                        "cur %llu end %llu\n", page->index,
2349                                        (unsigned long long)cur,
2350                                        (unsigned long long)end);
2351                         }
2352
2353                         ret = submit_extent_page(WRITE, tree, page, sector,
2354                                                  iosize, pg_offset, bdev,
2355                                                  &epd->bio, max_nr,
2356                                                  end_bio_extent_writepage,
2357                                                  0, 0, 0);
2358                         if (ret)
2359                                 SetPageError(page);
2360                 }
2361                 cur = cur + iosize;
2362                 pg_offset += iosize;
2363                 nr++;
2364         }
2365 done:
2366         if (nr == 0) {
2367                 /* make sure the mapping tag for page dirty gets cleared */
2368                 set_page_writeback(page);
2369                 end_page_writeback(page);
2370         }
2371         if (unlock_start <= page_end)
2372                 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2373         unlock_page(page);
2374
2375 update_nr_written:
2376         wbc->nr_to_write -= nr_written;
2377         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2378             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2379                 page->mapping->writeback_index = page->index + nr_written;
2380         return 0;
2381 }
2382
2383 /**
2384  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2385  * @mapping: address space structure to write
2386  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2387  * @writepage: function called for each page
2388  * @data: data passed to writepage function
2389  *
2390  * If a page is already under I/O, write_cache_pages() skips it, even
2391  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2392  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2393  * and msync() need to guarantee that all the data which was dirty at the time
2394  * the call was made get new I/O started against them.  If wbc->sync_mode is
2395  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2396  * existing IO to complete.
2397  */
2398 int extent_write_cache_pages(struct extent_io_tree *tree,
2399                              struct address_space *mapping,
2400                              struct writeback_control *wbc,
2401                              writepage_t writepage, void *data)
2402 {
2403         struct backing_dev_info *bdi = mapping->backing_dev_info;
2404         int ret = 0;
2405         int done = 0;
2406         struct pagevec pvec;
2407         int nr_pages;
2408         pgoff_t index;
2409         pgoff_t end;            /* Inclusive */
2410         int scanned = 0;
2411         int range_whole = 0;
2412
2413         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2414                 wbc->encountered_congestion = 1;
2415                 return 0;
2416         }
2417
2418         pagevec_init(&pvec, 0);
2419         if (wbc->range_cyclic) {
2420                 index = mapping->writeback_index; /* Start from prev offset */
2421                 end = -1;
2422         } else {
2423                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2424                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2425                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2426                         range_whole = 1;
2427                 scanned = 1;
2428         }
2429 retry:
2430         while (!done && (index <= end) &&
2431                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2432                                               PAGECACHE_TAG_DIRTY,
2433                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2434                 unsigned i;
2435
2436                 scanned = 1;
2437                 for (i = 0; i < nr_pages; i++) {
2438                         struct page *page = pvec.pages[i];
2439
2440                         /*
2441                          * At this point we hold neither mapping->tree_lock nor
2442                          * lock on the page itself: the page may be truncated or
2443                          * invalidated (changing page->mapping to NULL), or even
2444                          * swizzled back from swapper_space to tmpfs file
2445                          * mapping
2446                          */
2447                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2448                                 tree->ops->write_cache_pages_lock_hook(page);
2449                         else
2450                                 lock_page(page);
2451
2452                         if (unlikely(page->mapping != mapping)) {
2453                                 unlock_page(page);
2454                                 continue;
2455                         }
2456
2457                         if (!wbc->range_cyclic && page->index > end) {
2458                                 done = 1;
2459                                 unlock_page(page);
2460                                 continue;
2461                         }
2462
2463                         if (wbc->sync_mode != WB_SYNC_NONE)
2464                                 wait_on_page_writeback(page);
2465
2466                         if (PageWriteback(page) ||
2467                             !clear_page_dirty_for_io(page)) {
2468                                 unlock_page(page);
2469                                 continue;
2470                         }
2471
2472                         ret = (*writepage)(page, wbc, data);
2473
2474                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2475                                 unlock_page(page);
2476                                 ret = 0;
2477                         }
2478                         if (ret || wbc->nr_to_write <= 0)
2479                                 done = 1;
2480                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2481                                 wbc->encountered_congestion = 1;
2482                                 done = 1;
2483                         }
2484                 }
2485                 pagevec_release(&pvec);
2486                 cond_resched();
2487         }
2488         if (!scanned && !done) {
2489                 /*
2490                  * We hit the last page and there is more work to be done: wrap
2491                  * back to the start of the file
2492                  */
2493                 scanned = 1;
2494                 index = 0;
2495                 goto retry;
2496         }
2497         return ret;
2498 }
2499 EXPORT_SYMBOL(extent_write_cache_pages);
2500
2501 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2502                           get_extent_t *get_extent,
2503                           struct writeback_control *wbc)
2504 {
2505         int ret;
2506         struct address_space *mapping = page->mapping;
2507         struct extent_page_data epd = {
2508                 .bio = NULL,
2509                 .tree = tree,
2510                 .get_extent = get_extent,
2511                 .extent_locked = 0,
2512         };
2513         struct writeback_control wbc_writepages = {
2514                 .bdi            = wbc->bdi,
2515                 .sync_mode      = WB_SYNC_NONE,
2516                 .older_than_this = NULL,
2517                 .nr_to_write    = 64,
2518                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2519                 .range_end      = (loff_t)-1,
2520         };
2521
2522
2523         ret = __extent_writepage(page, wbc, &epd);
2524
2525         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2526                                  __extent_writepage, &epd);
2527         if (epd.bio) {
2528                 submit_one_bio(WRITE, epd.bio, 0, 0);
2529         }
2530         return ret;
2531 }
2532 EXPORT_SYMBOL(extent_write_full_page);
2533
2534 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2535                               u64 start, u64 end, get_extent_t *get_extent,
2536                               int mode)
2537 {
2538         int ret = 0;
2539         struct address_space *mapping = inode->i_mapping;
2540         struct page *page;
2541         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2542                 PAGE_CACHE_SHIFT;
2543
2544         struct extent_page_data epd = {
2545                 .bio = NULL,
2546                 .tree = tree,
2547                 .get_extent = get_extent,
2548                 .extent_locked = 1,
2549         };
2550         struct writeback_control wbc_writepages = {
2551                 .bdi            = inode->i_mapping->backing_dev_info,
2552                 .sync_mode      = mode,
2553                 .older_than_this = NULL,
2554                 .nr_to_write    = nr_pages * 2,
2555                 .range_start    = start,
2556                 .range_end      = end + 1,
2557         };
2558
2559         while(start <= end) {
2560                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2561                 if (clear_page_dirty_for_io(page))
2562                         ret = __extent_writepage(page, &wbc_writepages, &epd);
2563                 else {
2564                         if (tree->ops && tree->ops->writepage_end_io_hook)
2565                                 tree->ops->writepage_end_io_hook(page, start,
2566                                                  start + PAGE_CACHE_SIZE - 1,
2567                                                  NULL, 1);
2568                         unlock_page(page);
2569                 }
2570                 page_cache_release(page);
2571                 start += PAGE_CACHE_SIZE;
2572         }
2573
2574         if (epd.bio)
2575                 submit_one_bio(WRITE, epd.bio, 0, 0);
2576         return ret;
2577 }
2578 EXPORT_SYMBOL(extent_write_locked_range);
2579
2580
2581 int extent_writepages(struct extent_io_tree *tree,
2582                       struct address_space *mapping,
2583                       get_extent_t *get_extent,
2584                       struct writeback_control *wbc)
2585 {
2586         int ret = 0;
2587         struct extent_page_data epd = {
2588                 .bio = NULL,
2589                 .tree = tree,
2590                 .get_extent = get_extent,
2591                 .extent_locked = 0,
2592         };
2593
2594         ret = extent_write_cache_pages(tree, mapping, wbc,
2595                                        __extent_writepage, &epd);
2596         if (epd.bio) {
2597                 submit_one_bio(WRITE, epd.bio, 0, 0);
2598         }
2599         return ret;
2600 }
2601 EXPORT_SYMBOL(extent_writepages);
2602
2603 int extent_readpages(struct extent_io_tree *tree,
2604                      struct address_space *mapping,
2605                      struct list_head *pages, unsigned nr_pages,
2606                      get_extent_t get_extent)
2607 {
2608         struct bio *bio = NULL;
2609         unsigned page_idx;
2610         struct pagevec pvec;
2611         unsigned long bio_flags = 0;
2612
2613         pagevec_init(&pvec, 0);
2614         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2615                 struct page *page = list_entry(pages->prev, struct page, lru);
2616
2617                 prefetchw(&page->flags);
2618                 list_del(&page->lru);
2619                 /*
2620                  * what we want to do here is call add_to_page_cache_lru,
2621                  * but that isn't exported, so we reproduce it here
2622                  */
2623                 if (!add_to_page_cache(page, mapping,
2624                                         page->index, GFP_KERNEL)) {
2625
2626                         /* open coding of lru_cache_add, also not exported */
2627                         page_cache_get(page);
2628                         if (!pagevec_add(&pvec, page))
2629                                 __pagevec_lru_add(&pvec);
2630                         __extent_read_full_page(tree, page, get_extent,
2631                                                 &bio, 0, &bio_flags);
2632                 }
2633                 page_cache_release(page);
2634         }
2635         if (pagevec_count(&pvec))
2636                 __pagevec_lru_add(&pvec);
2637         BUG_ON(!list_empty(pages));
2638         if (bio)
2639                 submit_one_bio(READ, bio, 0, bio_flags);
2640         return 0;
2641 }
2642 EXPORT_SYMBOL(extent_readpages);
2643
2644 /*
2645  * basic invalidatepage code, this waits on any locked or writeback
2646  * ranges corresponding to the page, and then deletes any extent state
2647  * records from the tree
2648  */
2649 int extent_invalidatepage(struct extent_io_tree *tree,
2650                           struct page *page, unsigned long offset)
2651 {
2652         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2653         u64 end = start + PAGE_CACHE_SIZE - 1;
2654         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2655
2656         start += (offset + blocksize -1) & ~(blocksize - 1);
2657         if (start > end)
2658                 return 0;
2659
2660         lock_extent(tree, start, end, GFP_NOFS);
2661         wait_on_extent_writeback(tree, start, end);
2662         clear_extent_bit(tree, start, end,
2663                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2664                          1, 1, GFP_NOFS);
2665         return 0;
2666 }
2667 EXPORT_SYMBOL(extent_invalidatepage);
2668
2669 /*
2670  * simple commit_write call, set_range_dirty is used to mark both
2671  * the pages and the extent records as dirty
2672  */
2673 int extent_commit_write(struct extent_io_tree *tree,
2674                         struct inode *inode, struct page *page,
2675                         unsigned from, unsigned to)
2676 {
2677         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2678
2679         set_page_extent_mapped(page);
2680         set_page_dirty(page);
2681
2682         if (pos > inode->i_size) {
2683                 i_size_write(inode, pos);
2684                 mark_inode_dirty(inode);
2685         }
2686         return 0;
2687 }
2688 EXPORT_SYMBOL(extent_commit_write);
2689
2690 int extent_prepare_write(struct extent_io_tree *tree,
2691                          struct inode *inode, struct page *page,
2692                          unsigned from, unsigned to, get_extent_t *get_extent)
2693 {
2694         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2695         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2696         u64 block_start;
2697         u64 orig_block_start;
2698         u64 block_end;
2699         u64 cur_end;
2700         struct extent_map *em;
2701         unsigned blocksize = 1 << inode->i_blkbits;
2702         size_t page_offset = 0;
2703         size_t block_off_start;
2704         size_t block_off_end;
2705         int err = 0;
2706         int iocount = 0;
2707         int ret = 0;
2708         int isnew;
2709
2710         set_page_extent_mapped(page);
2711
2712         block_start = (page_start + from) & ~((u64)blocksize - 1);
2713         block_end = (page_start + to - 1) | (blocksize - 1);
2714         orig_block_start = block_start;
2715
2716         lock_extent(tree, page_start, page_end, GFP_NOFS);
2717         while(block_start <= block_end) {
2718                 em = get_extent(inode, page, page_offset, block_start,
2719                                 block_end - block_start + 1, 1);
2720                 if (IS_ERR(em) || !em) {
2721                         goto err;
2722                 }
2723                 cur_end = min(block_end, extent_map_end(em) - 1);
2724                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2725                 block_off_end = block_off_start + blocksize;
2726                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2727
2728                 if (!PageUptodate(page) && isnew &&
2729                     (block_off_end > to || block_off_start < from)) {
2730                         void *kaddr;
2731
2732                         kaddr = kmap_atomic(page, KM_USER0);
2733                         if (block_off_end > to)
2734                                 memset(kaddr + to, 0, block_off_end - to);
2735                         if (block_off_start < from)
2736                                 memset(kaddr + block_off_start, 0,
2737                                        from - block_off_start);
2738                         flush_dcache_page(page);
2739                         kunmap_atomic(kaddr, KM_USER0);
2740                 }
2741                 if ((em->block_start != EXTENT_MAP_HOLE &&
2742                      em->block_start != EXTENT_MAP_INLINE) &&
2743                     !isnew && !PageUptodate(page) &&
2744                     (block_off_end > to || block_off_start < from) &&
2745                     !test_range_bit(tree, block_start, cur_end,
2746                                     EXTENT_UPTODATE, 1)) {
2747                         u64 sector;
2748                         u64 extent_offset = block_start - em->start;
2749                         size_t iosize;
2750                         sector = (em->block_start + extent_offset) >> 9;
2751                         iosize = (cur_end - block_start + blocksize) &
2752                                 ~((u64)blocksize - 1);
2753                         /*
2754                          * we've already got the extent locked, but we
2755                          * need to split the state such that our end_bio
2756                          * handler can clear the lock.
2757                          */
2758                         set_extent_bit(tree, block_start,
2759                                        block_start + iosize - 1,
2760                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2761                         ret = submit_extent_page(READ, tree, page,
2762                                          sector, iosize, page_offset, em->bdev,
2763                                          NULL, 1,
2764                                          end_bio_extent_preparewrite, 0,
2765                                          0, 0);
2766                         iocount++;
2767                         block_start = block_start + iosize;
2768                 } else {
2769                         set_extent_uptodate(tree, block_start, cur_end,
2770                                             GFP_NOFS);
2771                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2772                         block_start = cur_end + 1;
2773                 }
2774                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2775                 free_extent_map(em);
2776         }
2777         if (iocount) {
2778                 wait_extent_bit(tree, orig_block_start,
2779                                 block_end, EXTENT_LOCKED);
2780         }
2781         check_page_uptodate(tree, page);
2782 err:
2783         /* FIXME, zero out newly allocated blocks on error */
2784         return err;
2785 }
2786 EXPORT_SYMBOL(extent_prepare_write);
2787
2788 /*
2789  * a helper for releasepage, this tests for areas of the page that
2790  * are locked or under IO and drops the related state bits if it is safe
2791  * to drop the page.
2792  */
2793 int try_release_extent_state(struct extent_map_tree *map,
2794                              struct extent_io_tree *tree, struct page *page,
2795                              gfp_t mask)
2796 {
2797         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2798         u64 end = start + PAGE_CACHE_SIZE - 1;
2799         int ret = 1;
2800
2801         if (test_range_bit(tree, start, end,
2802                            EXTENT_IOBITS | EXTENT_ORDERED, 0))
2803                 ret = 0;
2804         else {
2805                 if ((mask & GFP_NOFS) == GFP_NOFS)
2806                         mask = GFP_NOFS;
2807                 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2808                                  1, 1, mask);
2809         }
2810         return ret;
2811 }
2812 EXPORT_SYMBOL(try_release_extent_state);
2813
2814 /*
2815  * a helper for releasepage.  As long as there are no locked extents
2816  * in the range corresponding to the page, both state records and extent
2817  * map records are removed
2818  */
2819 int try_release_extent_mapping(struct extent_map_tree *map,
2820                                struct extent_io_tree *tree, struct page *page,
2821                                gfp_t mask)
2822 {
2823         struct extent_map *em;
2824         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2825         u64 end = start + PAGE_CACHE_SIZE - 1;
2826
2827         if ((mask & __GFP_WAIT) &&
2828             page->mapping->host->i_size > 16 * 1024 * 1024) {
2829                 u64 len;
2830                 while (start <= end) {
2831                         len = end - start + 1;
2832                         spin_lock(&map->lock);
2833                         em = lookup_extent_mapping(map, start, len);
2834                         if (!em || IS_ERR(em)) {
2835                                 spin_unlock(&map->lock);
2836                                 break;
2837                         }
2838                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2839                             em->start != start) {
2840                                 spin_unlock(&map->lock);
2841                                 free_extent_map(em);
2842                                 break;
2843                         }
2844                         if (!test_range_bit(tree, em->start,
2845                                             extent_map_end(em) - 1,
2846                                             EXTENT_LOCKED | EXTENT_WRITEBACK |
2847                                             EXTENT_ORDERED,
2848                                             0)) {
2849                                 remove_extent_mapping(map, em);
2850                                 /* once for the rb tree */
2851                                 free_extent_map(em);
2852                         }
2853                         start = extent_map_end(em);
2854                         spin_unlock(&map->lock);
2855
2856                         /* once for us */
2857                         free_extent_map(em);
2858                 }
2859         }
2860         return try_release_extent_state(map, tree, page, mask);
2861 }
2862 EXPORT_SYMBOL(try_release_extent_mapping);
2863
2864 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2865                 get_extent_t *get_extent)
2866 {
2867         struct inode *inode = mapping->host;
2868         u64 start = iblock << inode->i_blkbits;
2869         sector_t sector = 0;
2870         size_t blksize = (1 << inode->i_blkbits);
2871         struct extent_map *em;
2872
2873         lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2874                     GFP_NOFS);
2875         em = get_extent(inode, NULL, 0, start, blksize, 0);
2876         unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2877                       GFP_NOFS);
2878         if (!em || IS_ERR(em))
2879                 return 0;
2880
2881         if (em->block_start > EXTENT_MAP_LAST_BYTE)
2882                 goto out;
2883
2884         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2885 out:
2886         free_extent_map(em);
2887         return sector;
2888 }
2889
2890 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2891                                               unsigned long i)
2892 {
2893         struct page *p;
2894         struct address_space *mapping;
2895
2896         if (i == 0)
2897                 return eb->first_page;
2898         i += eb->start >> PAGE_CACHE_SHIFT;
2899         mapping = eb->first_page->mapping;
2900         if (!mapping)
2901                 return NULL;
2902
2903         /*
2904          * extent_buffer_page is only called after pinning the page
2905          * by increasing the reference count.  So we know the page must
2906          * be in the radix tree.
2907          */
2908         rcu_read_lock();
2909         p = radix_tree_lookup(&mapping->page_tree, i);
2910         rcu_read_unlock();
2911
2912         return p;
2913 }
2914
2915 static inline unsigned long num_extent_pages(u64 start, u64 len)
2916 {
2917         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2918                 (start >> PAGE_CACHE_SHIFT);
2919 }
2920
2921 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2922                                                    u64 start,
2923                                                    unsigned long len,
2924                                                    gfp_t mask)
2925 {
2926         struct extent_buffer *eb = NULL;
2927 #ifdef LEAK_DEBUG
2928         unsigned long flags;
2929 #endif
2930
2931         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2932         eb->start = start;
2933         eb->len = len;
2934         mutex_init(&eb->mutex);
2935 #ifdef LEAK_DEBUG
2936         spin_lock_irqsave(&leak_lock, flags);
2937         list_add(&eb->leak_list, &buffers);
2938         spin_unlock_irqrestore(&leak_lock, flags);
2939 #endif
2940         atomic_set(&eb->refs, 1);
2941
2942         return eb;
2943 }
2944
2945 static void __free_extent_buffer(struct extent_buffer *eb)
2946 {
2947 #ifdef LEAK_DEBUG
2948         unsigned long flags;
2949         spin_lock_irqsave(&leak_lock, flags);
2950         list_del(&eb->leak_list);
2951         spin_unlock_irqrestore(&leak_lock, flags);
2952 #endif
2953         kmem_cache_free(extent_buffer_cache, eb);
2954 }
2955
2956 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2957                                           u64 start, unsigned long len,
2958                                           struct page *page0,
2959                                           gfp_t mask)
2960 {
2961         unsigned long num_pages = num_extent_pages(start, len);
2962         unsigned long i;
2963         unsigned long index = start >> PAGE_CACHE_SHIFT;
2964         struct extent_buffer *eb;
2965         struct extent_buffer *exists = NULL;
2966         struct page *p;
2967         struct address_space *mapping = tree->mapping;
2968         int uptodate = 1;
2969
2970         spin_lock(&tree->buffer_lock);
2971         eb = buffer_search(tree, start);
2972         if (eb) {
2973                 atomic_inc(&eb->refs);
2974                 spin_unlock(&tree->buffer_lock);
2975                 mark_page_accessed(eb->first_page);
2976                 return eb;
2977         }
2978         spin_unlock(&tree->buffer_lock);
2979
2980         eb = __alloc_extent_buffer(tree, start, len, mask);
2981         if (!eb)
2982                 return NULL;
2983
2984         if (page0) {
2985                 eb->first_page = page0;
2986                 i = 1;
2987                 index++;
2988                 page_cache_get(page0);
2989                 mark_page_accessed(page0);
2990                 set_page_extent_mapped(page0);
2991                 set_page_extent_head(page0, len);
2992                 uptodate = PageUptodate(page0);
2993         } else {
2994                 i = 0;
2995         }
2996         for (; i < num_pages; i++, index++) {
2997                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2998                 if (!p) {
2999                         WARN_ON(1);
3000                         goto free_eb;
3001                 }
3002                 set_page_extent_mapped(p);
3003                 mark_page_accessed(p);
3004                 if (i == 0) {
3005                         eb->first_page = p;
3006                         set_page_extent_head(p, len);
3007                 } else {
3008                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3009                 }
3010                 if (!PageUptodate(p))
3011                         uptodate = 0;
3012                 unlock_page(p);
3013         }
3014         if (uptodate)
3015                 eb->flags |= EXTENT_UPTODATE;
3016         eb->flags |= EXTENT_BUFFER_FILLED;
3017
3018         spin_lock(&tree->buffer_lock);
3019         exists = buffer_tree_insert(tree, start, &eb->rb_node);
3020         if (exists) {
3021                 /* add one reference for the caller */
3022                 atomic_inc(&exists->refs);
3023                 spin_unlock(&tree->buffer_lock);
3024                 goto free_eb;
3025         }
3026         spin_unlock(&tree->buffer_lock);
3027
3028         /* add one reference for the tree */
3029         atomic_inc(&eb->refs);
3030         return eb;
3031
3032 free_eb:
3033         if (!atomic_dec_and_test(&eb->refs))
3034                 return exists;
3035         for (index = 1; index < i; index++)
3036                 page_cache_release(extent_buffer_page(eb, index));
3037         page_cache_release(extent_buffer_page(eb, 0));
3038         __free_extent_buffer(eb);
3039         return exists;
3040 }
3041 EXPORT_SYMBOL(alloc_extent_buffer);
3042
3043 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3044                                          u64 start, unsigned long len,
3045                                           gfp_t mask)
3046 {
3047         struct extent_buffer *eb;
3048
3049         spin_lock(&tree->buffer_lock);
3050         eb = buffer_search(tree, start);
3051         if (eb)
3052                 atomic_inc(&eb->refs);
3053         spin_unlock(&tree->buffer_lock);
3054
3055         if (eb)
3056                 mark_page_accessed(eb->first_page);
3057
3058         return eb;
3059 }
3060 EXPORT_SYMBOL(find_extent_buffer);
3061
3062 void free_extent_buffer(struct extent_buffer *eb)
3063 {
3064         if (!eb)
3065                 return;
3066
3067         if (!atomic_dec_and_test(&eb->refs))
3068                 return;
3069
3070         WARN_ON(1);
3071 }
3072 EXPORT_SYMBOL(free_extent_buffer);
3073
3074 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3075                               struct extent_buffer *eb)
3076 {
3077         int set;
3078         unsigned long i;
3079         unsigned long num_pages;
3080         struct page *page;
3081
3082         u64 start = eb->start;
3083         u64 end = start + eb->len - 1;
3084
3085         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
3086         num_pages = num_extent_pages(eb->start, eb->len);
3087
3088         for (i = 0; i < num_pages; i++) {
3089                 page = extent_buffer_page(eb, i);
3090                 lock_page(page);
3091                 if (i == 0)
3092                         set_page_extent_head(page, eb->len);
3093                 else
3094                         set_page_private(page, EXTENT_PAGE_PRIVATE);
3095
3096                 /*
3097                  * if we're on the last page or the first page and the
3098                  * block isn't aligned on a page boundary, do extra checks
3099                  * to make sure we don't clean page that is partially dirty
3100                  */
3101                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3102                     ((i == num_pages - 1) &&
3103                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3104                         start = (u64)page->index << PAGE_CACHE_SHIFT;
3105                         end  = start + PAGE_CACHE_SIZE - 1;
3106                         if (test_range_bit(tree, start, end,
3107                                            EXTENT_DIRTY, 0)) {
3108                                 unlock_page(page);
3109                                 continue;
3110                         }
3111                 }
3112                 clear_page_dirty_for_io(page);
3113                 spin_lock_irq(&page->mapping->tree_lock);
3114                 if (!PageDirty(page)) {
3115                         radix_tree_tag_clear(&page->mapping->page_tree,
3116                                                 page_index(page),
3117                                                 PAGECACHE_TAG_DIRTY);
3118                 }
3119                 spin_unlock_irq(&page->mapping->tree_lock);
3120                 unlock_page(page);
3121         }
3122         return 0;
3123 }
3124 EXPORT_SYMBOL(clear_extent_buffer_dirty);
3125
3126 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3127                                     struct extent_buffer *eb)
3128 {
3129         return wait_on_extent_writeback(tree, eb->start,
3130                                         eb->start + eb->len - 1);
3131 }
3132 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3133
3134 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3135                              struct extent_buffer *eb)
3136 {
3137         unsigned long i;
3138         unsigned long num_pages;
3139
3140         num_pages = num_extent_pages(eb->start, eb->len);
3141         for (i = 0; i < num_pages; i++) {
3142                 struct page *page = extent_buffer_page(eb, i);
3143                 /* writepage may need to do something special for the
3144                  * first page, we have to make sure page->private is
3145                  * properly set.  releasepage may drop page->private
3146                  * on us if the page isn't already dirty.
3147                  */
3148                 lock_page(page);
3149                 if (i == 0) {
3150                         set_page_extent_head(page, eb->len);
3151                 } else if (PagePrivate(page) &&
3152                            page->private != EXTENT_PAGE_PRIVATE) {
3153                         set_page_extent_mapped(page);
3154                 }
3155                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3156                 set_extent_dirty(tree, page_offset(page),
3157                                  page_offset(page) + PAGE_CACHE_SIZE -1,
3158                                  GFP_NOFS);
3159                 unlock_page(page);
3160         }
3161         return 0;
3162 }
3163 EXPORT_SYMBOL(set_extent_buffer_dirty);
3164
3165 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3166                                 struct extent_buffer *eb)
3167 {
3168         unsigned long i;
3169         struct page *page;
3170         unsigned long num_pages;
3171
3172         num_pages = num_extent_pages(eb->start, eb->len);
3173         eb->flags &= ~EXTENT_UPTODATE;
3174
3175         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3176                               GFP_NOFS);
3177         for (i = 0; i < num_pages; i++) {
3178                 page = extent_buffer_page(eb, i);
3179                 if (page)
3180                         ClearPageUptodate(page);
3181         }
3182         return 0;
3183 }
3184
3185 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3186                                 struct extent_buffer *eb)
3187 {
3188         unsigned long i;
3189         struct page *page;
3190         unsigned long num_pages;
3191
3192         num_pages = num_extent_pages(eb->start, eb->len);
3193
3194         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3195                             GFP_NOFS);
3196         for (i = 0; i < num_pages; i++) {
3197                 page = extent_buffer_page(eb, i);
3198                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3199                     ((i == num_pages - 1) &&
3200                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3201                         check_page_uptodate(tree, page);
3202                         continue;
3203                 }
3204                 SetPageUptodate(page);
3205         }
3206         return 0;
3207 }
3208 EXPORT_SYMBOL(set_extent_buffer_uptodate);
3209
3210 int extent_range_uptodate(struct extent_io_tree *tree,
3211                           u64 start, u64 end)
3212 {
3213         struct page *page;
3214         int ret;
3215         int pg_uptodate = 1;
3216         int uptodate;
3217         unsigned long index;
3218
3219         ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3220         if (ret)
3221                 return 1;
3222         while(start <= end) {
3223                 index = start >> PAGE_CACHE_SHIFT;
3224                 page = find_get_page(tree->mapping, index);
3225                 uptodate = PageUptodate(page);
3226                 page_cache_release(page);
3227                 if (!uptodate) {
3228                         pg_uptodate = 0;
3229                         break;
3230                 }
3231                 start += PAGE_CACHE_SIZE;
3232         }
3233         return pg_uptodate;
3234 }
3235
3236 int extent_buffer_uptodate(struct extent_io_tree *tree,
3237                            struct extent_buffer *eb)
3238 {
3239         int ret = 0;
3240         unsigned long num_pages;
3241         unsigned long i;
3242         struct page *page;
3243         int pg_uptodate = 1;
3244
3245         if (eb->flags & EXTENT_UPTODATE)
3246                 return 1;
3247
3248         ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3249                            EXTENT_UPTODATE, 1);
3250         if (ret)
3251                 return ret;
3252
3253         num_pages = num_extent_pages(eb->start, eb->len);
3254         for (i = 0; i < num_pages; i++) {
3255                 page = extent_buffer_page(eb, i);
3256                 if (!PageUptodate(page)) {
3257                         pg_uptodate = 0;
3258                         break;
3259                 }
3260         }
3261         return pg_uptodate;
3262 }
3263 EXPORT_SYMBOL(extent_buffer_uptodate);
3264
3265 int read_extent_buffer_pages(struct extent_io_tree *tree,
3266                              struct extent_buffer *eb,
3267                              u64 start, int wait,
3268                              get_extent_t *get_extent, int mirror_num)
3269 {
3270         unsigned long i;
3271         unsigned long start_i;
3272         struct page *page;
3273         int err;
3274         int ret = 0;
3275         int locked_pages = 0;
3276         int all_uptodate = 1;
3277         int inc_all_pages = 0;
3278         unsigned long num_pages;
3279         struct bio *bio = NULL;
3280         unsigned long bio_flags = 0;
3281
3282         if (eb->flags & EXTENT_UPTODATE)
3283                 return 0;
3284
3285         if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3286                            EXTENT_UPTODATE, 1)) {
3287                 return 0;
3288         }
3289
3290         if (start) {
3291                 WARN_ON(start < eb->start);
3292                 start_i = (start >> PAGE_CACHE_SHIFT) -
3293                         (eb->start >> PAGE_CACHE_SHIFT);
3294         } else {
3295                 start_i = 0;
3296         }
3297
3298         num_pages = num_extent_pages(eb->start, eb->len);
3299         for (i = start_i; i < num_pages; i++) {
3300                 page = extent_buffer_page(eb, i);
3301                 if (!wait) {
3302                         if (!trylock_page(page))
3303                                 goto unlock_exit;
3304                 } else {
3305                         lock_page(page);
3306                 }
3307                 locked_pages++;
3308                 if (!PageUptodate(page)) {
3309                         all_uptodate = 0;
3310                 }
3311         }
3312         if (all_uptodate) {
3313                 if (start_i == 0)
3314                         eb->flags |= EXTENT_UPTODATE;
3315                 if (ret) {
3316                         printk("all up to date but ret is %d\n", ret);
3317                 }
3318                 goto unlock_exit;
3319         }
3320
3321         for (i = start_i; i < num_pages; i++) {
3322                 page = extent_buffer_page(eb, i);
3323                 if (inc_all_pages)
3324                         page_cache_get(page);
3325                 if (!PageUptodate(page)) {
3326                         if (start_i == 0)
3327                                 inc_all_pages = 1;
3328                         ClearPageError(page);
3329                         err = __extent_read_full_page(tree, page,
3330                                                       get_extent, &bio,
3331                                                       mirror_num, &bio_flags);
3332                         if (err) {
3333                                 ret = err;
3334                                 printk("err %d from __extent_read_full_page\n", ret);
3335                         }
3336                 } else {
3337                         unlock_page(page);
3338                 }
3339         }
3340
3341         if (bio)
3342                 submit_one_bio(READ, bio, mirror_num, bio_flags);
3343
3344         if (ret || !wait) {
3345                 if (ret)
3346                         printk("ret %d wait %d returning\n", ret, wait);
3347                 return ret;
3348         }
3349         for (i = start_i; i < num_pages; i++) {
3350                 page = extent_buffer_page(eb, i);
3351                 wait_on_page_locked(page);
3352                 if (!PageUptodate(page)) {
3353                         printk("page not uptodate after wait_on_page_locked\n");
3354                         ret = -EIO;
3355                 }
3356         }
3357         if (!ret)
3358                 eb->flags |= EXTENT_UPTODATE;
3359         return ret;
3360
3361 unlock_exit:
3362         i = start_i;
3363         while(locked_pages > 0) {
3364                 page = extent_buffer_page(eb, i);
3365                 i++;
3366                 unlock_page(page);
3367                 locked_pages--;
3368         }
3369         return ret;
3370 }
3371 EXPORT_SYMBOL(read_extent_buffer_pages);
3372
3373 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3374                         unsigned long start,
3375                         unsigned long len)
3376 {
3377         size_t cur;
3378         size_t offset;
3379         struct page *page;
3380         char *kaddr;
3381         char *dst = (char *)dstv;
3382         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3383         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3384
3385         WARN_ON(start > eb->len);
3386         WARN_ON(start + len > eb->start + eb->len);
3387
3388         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3389
3390         while(len > 0) {
3391                 page = extent_buffer_page(eb, i);
3392
3393                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3394                 kaddr = kmap_atomic(page, KM_USER1);
3395                 memcpy(dst, kaddr + offset, cur);
3396                 kunmap_atomic(kaddr, KM_USER1);
3397
3398                 dst += cur;
3399                 len -= cur;
3400                 offset = 0;
3401                 i++;
3402         }
3403 }
3404 EXPORT_SYMBOL(read_extent_buffer);
3405
3406 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3407                                unsigned long min_len, char **token, char **map,
3408                                unsigned long *map_start,
3409                                unsigned long *map_len, int km)
3410 {
3411         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3412         char *kaddr;
3413         struct page *p;
3414         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3415         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3416         unsigned long end_i = (start_offset + start + min_len - 1) >>
3417                 PAGE_CACHE_SHIFT;
3418
3419         if (i != end_i)
3420                 return -EINVAL;
3421
3422         if (i == 0) {
3423                 offset = start_offset;
3424                 *map_start = 0;
3425         } else {
3426                 offset = 0;
3427                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3428         }
3429         if (start + min_len > eb->len) {
3430 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3431                 WARN_ON(1);
3432         }
3433
3434         p = extent_buffer_page(eb, i);
3435         kaddr = kmap_atomic(p, km);
3436         *token = kaddr;
3437         *map = kaddr + offset;
3438         *map_len = PAGE_CACHE_SIZE - offset;
3439         return 0;
3440 }
3441 EXPORT_SYMBOL(map_private_extent_buffer);
3442
3443 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3444                       unsigned long min_len,
3445                       char **token, char **map,
3446                       unsigned long *map_start,
3447                       unsigned long *map_len, int km)
3448 {
3449         int err;
3450         int save = 0;
3451         if (eb->map_token) {
3452                 unmap_extent_buffer(eb, eb->map_token, km);
3453                 eb->map_token = NULL;
3454                 save = 1;
3455         }
3456         err = map_private_extent_buffer(eb, start, min_len, token, map,
3457                                        map_start, map_len, km);
3458         if (!err && save) {
3459                 eb->map_token = *token;
3460                 eb->kaddr = *map;
3461                 eb->map_start = *map_start;
3462                 eb->map_len = *map_len;
3463         }
3464         return err;
3465 }
3466 EXPORT_SYMBOL(map_extent_buffer);
3467
3468 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3469 {
3470         kunmap_atomic(token, km);
3471 }
3472 EXPORT_SYMBOL(unmap_extent_buffer);
3473
3474 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3475                           unsigned long start,
3476                           unsigned long len)
3477 {
3478         size_t cur;
3479         size_t offset;
3480         struct page *page;
3481         char *kaddr;
3482         char *ptr = (char *)ptrv;
3483         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3484         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3485         int ret = 0;
3486
3487         WARN_ON(start > eb->len);
3488         WARN_ON(start + len > eb->start + eb->len);
3489
3490         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3491
3492         while(len > 0) {
3493                 page = extent_buffer_page(eb, i);
3494
3495                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3496
3497                 kaddr = kmap_atomic(page, KM_USER0);
3498                 ret = memcmp(ptr, kaddr + offset, cur);
3499                 kunmap_atomic(kaddr, KM_USER0);
3500                 if (ret)
3501                         break;
3502
3503                 ptr += cur;
3504                 len -= cur;
3505                 offset = 0;
3506                 i++;
3507         }
3508         return ret;
3509 }
3510 EXPORT_SYMBOL(memcmp_extent_buffer);
3511
3512 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3513                          unsigned long start, unsigned long len)
3514 {
3515         size_t cur;
3516         size_t offset;
3517         struct page *page;
3518         char *kaddr;
3519         char *src = (char *)srcv;
3520         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3521         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3522
3523         WARN_ON(start > eb->len);
3524         WARN_ON(start + len > eb->start + eb->len);
3525
3526         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3527
3528         while(len > 0) {
3529                 page = extent_buffer_page(eb, i);
3530                 WARN_ON(!PageUptodate(page));
3531
3532                 cur = min(len, PAGE_CACHE_SIZE - offset);
3533                 kaddr = kmap_atomic(page, KM_USER1);
3534                 memcpy(kaddr + offset, src, cur);
3535                 kunmap_atomic(kaddr, KM_USER1);
3536
3537                 src += cur;
3538                 len -= cur;
3539                 offset = 0;
3540                 i++;
3541         }
3542 }
3543 EXPORT_SYMBOL(write_extent_buffer);
3544
3545 void memset_extent_buffer(struct extent_buffer *eb, char c,
3546                           unsigned long start, unsigned long len)
3547 {
3548         size_t cur;
3549         size_t offset;
3550         struct page *page;
3551         char *kaddr;
3552         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3553         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3554
3555         WARN_ON(start > eb->len);
3556         WARN_ON(start + len > eb->start + eb->len);
3557
3558         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3559
3560         while(len > 0) {
3561                 page = extent_buffer_page(eb, i);
3562                 WARN_ON(!PageUptodate(page));
3563
3564                 cur = min(len, PAGE_CACHE_SIZE - offset);
3565                 kaddr = kmap_atomic(page, KM_USER0);
3566                 memset(kaddr + offset, c, cur);
3567                 kunmap_atomic(kaddr, KM_USER0);
3568
3569                 len -= cur;
3570                 offset = 0;
3571                 i++;
3572         }
3573 }
3574 EXPORT_SYMBOL(memset_extent_buffer);
3575
3576 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3577                         unsigned long dst_offset, unsigned long src_offset,
3578                         unsigned long len)
3579 {
3580         u64 dst_len = dst->len;
3581         size_t cur;
3582         size_t offset;
3583         struct page *page;
3584         char *kaddr;
3585         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3586         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3587
3588         WARN_ON(src->len != dst_len);
3589
3590         offset = (start_offset + dst_offset) &
3591                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3592
3593         while(len > 0) {
3594                 page = extent_buffer_page(dst, i);
3595                 WARN_ON(!PageUptodate(page));
3596
3597                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3598
3599                 kaddr = kmap_atomic(page, KM_USER0);
3600                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3601                 kunmap_atomic(kaddr, KM_USER0);
3602
3603                 src_offset += cur;
3604                 len -= cur;
3605                 offset = 0;
3606                 i++;
3607         }
3608 }
3609 EXPORT_SYMBOL(copy_extent_buffer);
3610
3611 static void move_pages(struct page *dst_page, struct page *src_page,
3612                        unsigned long dst_off, unsigned long src_off,
3613                        unsigned long len)
3614 {
3615         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3616         if (dst_page == src_page) {
3617                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3618         } else {
3619                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3620                 char *p = dst_kaddr + dst_off + len;
3621                 char *s = src_kaddr + src_off + len;
3622
3623                 while (len--)
3624                         *--p = *--s;
3625
3626                 kunmap_atomic(src_kaddr, KM_USER1);
3627         }
3628         kunmap_atomic(dst_kaddr, KM_USER0);
3629 }
3630
3631 static void copy_pages(struct page *dst_page, struct page *src_page,
3632                        unsigned long dst_off, unsigned long src_off,
3633                        unsigned long len)
3634 {
3635         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3636         char *src_kaddr;
3637
3638         if (dst_page != src_page)
3639                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3640         else
3641                 src_kaddr = dst_kaddr;
3642
3643         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3644         kunmap_atomic(dst_kaddr, KM_USER0);
3645         if (dst_page != src_page)
3646                 kunmap_atomic(src_kaddr, KM_USER1);
3647 }
3648
3649 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3650                            unsigned long src_offset, unsigned long len)
3651 {
3652         size_t cur;
3653         size_t dst_off_in_page;
3654         size_t src_off_in_page;
3655         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3656         unsigned long dst_i;
3657         unsigned long src_i;
3658
3659         if (src_offset + len > dst->len) {
3660                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3661                        src_offset, len, dst->len);
3662                 BUG_ON(1);
3663         }
3664         if (dst_offset + len > dst->len) {
3665                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3666                        dst_offset, len, dst->len);
3667                 BUG_ON(1);
3668         }
3669
3670         while(len > 0) {
3671                 dst_off_in_page = (start_offset + dst_offset) &
3672                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3673                 src_off_in_page = (start_offset + src_offset) &
3674                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3675
3676                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3677                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3678
3679                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3680                                                src_off_in_page));
3681                 cur = min_t(unsigned long, cur,
3682                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3683
3684                 copy_pages(extent_buffer_page(dst, dst_i),
3685                            extent_buffer_page(dst, src_i),
3686                            dst_off_in_page, src_off_in_page, cur);
3687
3688                 src_offset += cur;
3689                 dst_offset += cur;
3690                 len -= cur;
3691         }
3692 }
3693 EXPORT_SYMBOL(memcpy_extent_buffer);
3694
3695 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3696                            unsigned long src_offset, unsigned long len)
3697 {
3698         size_t cur;
3699         size_t dst_off_in_page;
3700         size_t src_off_in_page;
3701         unsigned long dst_end = dst_offset + len - 1;
3702         unsigned long src_end = src_offset + len - 1;
3703         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3704         unsigned long dst_i;
3705         unsigned long src_i;
3706
3707         if (src_offset + len > dst->len) {
3708                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3709                        src_offset, len, dst->len);
3710                 BUG_ON(1);
3711         }
3712         if (dst_offset + len > dst->len) {
3713                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3714                        dst_offset, len, dst->len);
3715                 BUG_ON(1);
3716         }
3717         if (dst_offset < src_offset) {
3718                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3719                 return;
3720         }
3721         while(len > 0) {
3722                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3723                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3724
3725                 dst_off_in_page = (start_offset + dst_end) &
3726                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3727                 src_off_in_page = (start_offset + src_end) &
3728                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3729
3730                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3731                 cur = min(cur, dst_off_in_page + 1);
3732                 move_pages(extent_buffer_page(dst, dst_i),
3733                            extent_buffer_page(dst, src_i),
3734                            dst_off_in_page - cur + 1,
3735                            src_off_in_page - cur + 1, cur);
3736
3737                 dst_end -= cur;
3738                 src_end -= cur;
3739                 len -= cur;
3740         }
3741 }
3742 EXPORT_SYMBOL(memmove_extent_buffer);
3743
3744 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3745 {
3746         u64 start = page_offset(page);
3747         struct extent_buffer *eb;
3748         int ret = 1;
3749         unsigned long i;
3750         unsigned long num_pages;
3751
3752         spin_lock(&tree->buffer_lock);
3753         eb = buffer_search(tree, start);
3754         if (!eb)
3755                 goto out;
3756
3757         if (atomic_read(&eb->refs) > 1) {
3758                 ret = 0;
3759                 goto out;
3760         }
3761         /* at this point we can safely release the extent buffer */
3762         num_pages = num_extent_pages(eb->start, eb->len);
3763         for (i = 0; i < num_pages; i++)
3764                 page_cache_release(extent_buffer_page(eb, i));
3765         rb_erase(&eb->rb_node, &tree->buffer);
3766         __free_extent_buffer(eb);
3767 out:
3768         spin_unlock(&tree->buffer_lock);
3769         return ret;
3770 }
3771 EXPORT_SYMBOL(try_release_extent_buffer);