]> Pileus Git - ~andy/linux/blob - fs/btrfs/extent_map.c
Btrfs: Add readpages support
[~andy/linux] / fs / btrfs / extent_map.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19                                        unsigned long extra_flags,
20                                        void (*ctor)(void *, struct kmem_cache *,
21                                                     unsigned long));
22
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32
33 struct tree_entry {
34         u64 start;
35         u64 end;
36         int in_tree;
37         struct rb_node rb_node;
38 };
39
40 struct extent_page_data {
41         struct bio *bio;
42         struct extent_map_tree *tree;
43         get_extent_t *get_extent;
44 };
45
46 void __init extent_map_init(void)
47 {
48         extent_map_cache = btrfs_cache_create("extent_map",
49                                             sizeof(struct extent_map), 0,
50                                             NULL);
51         extent_state_cache = btrfs_cache_create("extent_state",
52                                             sizeof(struct extent_state), 0,
53                                             NULL);
54         extent_buffer_cache = btrfs_cache_create("extent_buffers",
55                                             sizeof(struct extent_buffer), 0,
56                                             NULL);
57 }
58
59 void __exit extent_map_exit(void)
60 {
61         struct extent_state *state;
62
63         while (!list_empty(&states)) {
64                 state = list_entry(states.next, struct extent_state, list);
65                 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
66                 list_del(&state->list);
67                 kmem_cache_free(extent_state_cache, state);
68
69         }
70
71         if (extent_map_cache)
72                 kmem_cache_destroy(extent_map_cache);
73         if (extent_state_cache)
74                 kmem_cache_destroy(extent_state_cache);
75         if (extent_buffer_cache)
76                 kmem_cache_destroy(extent_buffer_cache);
77 }
78
79 void extent_map_tree_init(struct extent_map_tree *tree,
80                           struct address_space *mapping, gfp_t mask)
81 {
82         tree->map.rb_node = NULL;
83         tree->state.rb_node = NULL;
84         tree->ops = NULL;
85         rwlock_init(&tree->lock);
86         spin_lock_init(&tree->lru_lock);
87         tree->mapping = mapping;
88         INIT_LIST_HEAD(&tree->buffer_lru);
89         tree->lru_size = 0;
90 }
91 EXPORT_SYMBOL(extent_map_tree_init);
92
93 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
94 {
95         struct extent_buffer *eb;
96         while(!list_empty(&tree->buffer_lru)) {
97                 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
98                                 lru);
99                 list_del(&eb->lru);
100                 free_extent_buffer(eb);
101         }
102 }
103 EXPORT_SYMBOL(extent_map_tree_empty_lru);
104
105 struct extent_map *alloc_extent_map(gfp_t mask)
106 {
107         struct extent_map *em;
108         em = kmem_cache_alloc(extent_map_cache, mask);
109         if (!em || IS_ERR(em))
110                 return em;
111         em->in_tree = 0;
112         atomic_set(&em->refs, 1);
113         return em;
114 }
115 EXPORT_SYMBOL(alloc_extent_map);
116
117 void free_extent_map(struct extent_map *em)
118 {
119         if (!em)
120                 return;
121         if (atomic_dec_and_test(&em->refs)) {
122                 WARN_ON(em->in_tree);
123                 kmem_cache_free(extent_map_cache, em);
124         }
125 }
126 EXPORT_SYMBOL(free_extent_map);
127
128
129 struct extent_state *alloc_extent_state(gfp_t mask)
130 {
131         struct extent_state *state;
132         unsigned long flags;
133
134         state = kmem_cache_alloc(extent_state_cache, mask);
135         if (!state || IS_ERR(state))
136                 return state;
137         state->state = 0;
138         state->in_tree = 0;
139         state->private = 0;
140
141         spin_lock_irqsave(&state_lock, flags);
142         list_add(&state->list, &states);
143         spin_unlock_irqrestore(&state_lock, flags);
144
145         atomic_set(&state->refs, 1);
146         init_waitqueue_head(&state->wq);
147         return state;
148 }
149 EXPORT_SYMBOL(alloc_extent_state);
150
151 void free_extent_state(struct extent_state *state)
152 {
153         unsigned long flags;
154         if (!state)
155                 return;
156         if (atomic_dec_and_test(&state->refs)) {
157                 WARN_ON(state->in_tree);
158                 spin_lock_irqsave(&state_lock, flags);
159                 list_del(&state->list);
160                 spin_unlock_irqrestore(&state_lock, flags);
161                 kmem_cache_free(extent_state_cache, state);
162         }
163 }
164 EXPORT_SYMBOL(free_extent_state);
165
166 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
167                                    struct rb_node *node)
168 {
169         struct rb_node ** p = &root->rb_node;
170         struct rb_node * parent = NULL;
171         struct tree_entry *entry;
172
173         while(*p) {
174                 parent = *p;
175                 entry = rb_entry(parent, struct tree_entry, rb_node);
176
177                 if (offset < entry->start)
178                         p = &(*p)->rb_left;
179                 else if (offset > entry->end)
180                         p = &(*p)->rb_right;
181                 else
182                         return parent;
183         }
184
185         entry = rb_entry(node, struct tree_entry, rb_node);
186         entry->in_tree = 1;
187         rb_link_node(node, parent, p);
188         rb_insert_color(node, root);
189         return NULL;
190 }
191
192 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
193                                    struct rb_node **prev_ret)
194 {
195         struct rb_node * n = root->rb_node;
196         struct rb_node *prev = NULL;
197         struct tree_entry *entry;
198         struct tree_entry *prev_entry = NULL;
199
200         while(n) {
201                 entry = rb_entry(n, struct tree_entry, rb_node);
202                 prev = n;
203                 prev_entry = entry;
204
205                 if (offset < entry->start)
206                         n = n->rb_left;
207                 else if (offset > entry->end)
208                         n = n->rb_right;
209                 else
210                         return n;
211         }
212         if (!prev_ret)
213                 return NULL;
214         while(prev && offset > prev_entry->end) {
215                 prev = rb_next(prev);
216                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217         }
218         *prev_ret = prev;
219         return NULL;
220 }
221
222 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
223 {
224         struct rb_node *prev;
225         struct rb_node *ret;
226         ret = __tree_search(root, offset, &prev);
227         if (!ret)
228                 return prev;
229         return ret;
230 }
231
232 static int tree_delete(struct rb_root *root, u64 offset)
233 {
234         struct rb_node *node;
235         struct tree_entry *entry;
236
237         node = __tree_search(root, offset, NULL);
238         if (!node)
239                 return -ENOENT;
240         entry = rb_entry(node, struct tree_entry, rb_node);
241         entry->in_tree = 0;
242         rb_erase(node, root);
243         return 0;
244 }
245
246 /*
247  * add_extent_mapping tries a simple backward merge with existing
248  * mappings.  The extent_map struct passed in will be inserted into
249  * the tree directly (no copies made, just a reference taken).
250  */
251 int add_extent_mapping(struct extent_map_tree *tree,
252                        struct extent_map *em)
253 {
254         int ret = 0;
255         struct extent_map *prev = NULL;
256         struct rb_node *rb;
257
258         write_lock_irq(&tree->lock);
259         rb = tree_insert(&tree->map, em->end, &em->rb_node);
260         if (rb) {
261                 prev = rb_entry(rb, struct extent_map, rb_node);
262                 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
263                 ret = -EEXIST;
264                 goto out;
265         }
266         atomic_inc(&em->refs);
267         if (em->start != 0) {
268                 rb = rb_prev(&em->rb_node);
269                 if (rb)
270                         prev = rb_entry(rb, struct extent_map, rb_node);
271                 if (prev && prev->end + 1 == em->start &&
272                     ((em->block_start == EXTENT_MAP_HOLE &&
273                       prev->block_start == EXTENT_MAP_HOLE) ||
274                      (em->block_start == EXTENT_MAP_INLINE &&
275                       prev->block_start == EXTENT_MAP_INLINE) ||
276                      (em->block_start == EXTENT_MAP_DELALLOC &&
277                       prev->block_start == EXTENT_MAP_DELALLOC) ||
278                      (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
279                       em->block_start == prev->block_end + 1))) {
280                         em->start = prev->start;
281                         em->block_start = prev->block_start;
282                         rb_erase(&prev->rb_node, &tree->map);
283                         prev->in_tree = 0;
284                         free_extent_map(prev);
285                 }
286          }
287 out:
288         write_unlock_irq(&tree->lock);
289         return ret;
290 }
291 EXPORT_SYMBOL(add_extent_mapping);
292
293 /*
294  * lookup_extent_mapping returns the first extent_map struct in the
295  * tree that intersects the [start, end] (inclusive) range.  There may
296  * be additional objects in the tree that intersect, so check the object
297  * returned carefully to make sure you don't need additional lookups.
298  */
299 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
300                                          u64 start, u64 end)
301 {
302         struct extent_map *em;
303         struct rb_node *rb_node;
304
305         read_lock_irq(&tree->lock);
306         rb_node = tree_search(&tree->map, start);
307         if (!rb_node) {
308                 em = NULL;
309                 goto out;
310         }
311         if (IS_ERR(rb_node)) {
312                 em = ERR_PTR(PTR_ERR(rb_node));
313                 goto out;
314         }
315         em = rb_entry(rb_node, struct extent_map, rb_node);
316         if (em->end < start || em->start > end) {
317                 em = NULL;
318                 goto out;
319         }
320         atomic_inc(&em->refs);
321 out:
322         read_unlock_irq(&tree->lock);
323         return em;
324 }
325 EXPORT_SYMBOL(lookup_extent_mapping);
326
327 /*
328  * removes an extent_map struct from the tree.  No reference counts are
329  * dropped, and no checks are done to  see if the range is in use
330  */
331 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
332 {
333         int ret;
334
335         write_lock_irq(&tree->lock);
336         ret = tree_delete(&tree->map, em->end);
337         write_unlock_irq(&tree->lock);
338         return ret;
339 }
340 EXPORT_SYMBOL(remove_extent_mapping);
341
342 /*
343  * utility function to look for merge candidates inside a given range.
344  * Any extents with matching state are merged together into a single
345  * extent in the tree.  Extents with EXTENT_IO in their state field
346  * are not merged because the end_io handlers need to be able to do
347  * operations on them without sleeping (or doing allocations/splits).
348  *
349  * This should be called with the tree lock held.
350  */
351 static int merge_state(struct extent_map_tree *tree,
352                        struct extent_state *state)
353 {
354         struct extent_state *other;
355         struct rb_node *other_node;
356
357         if (state->state & EXTENT_IOBITS)
358                 return 0;
359
360         other_node = rb_prev(&state->rb_node);
361         if (other_node) {
362                 other = rb_entry(other_node, struct extent_state, rb_node);
363                 if (other->end == state->start - 1 &&
364                     other->state == state->state) {
365                         state->start = other->start;
366                         other->in_tree = 0;
367                         rb_erase(&other->rb_node, &tree->state);
368                         free_extent_state(other);
369                 }
370         }
371         other_node = rb_next(&state->rb_node);
372         if (other_node) {
373                 other = rb_entry(other_node, struct extent_state, rb_node);
374                 if (other->start == state->end + 1 &&
375                     other->state == state->state) {
376                         other->start = state->start;
377                         state->in_tree = 0;
378                         rb_erase(&state->rb_node, &tree->state);
379                         free_extent_state(state);
380                 }
381         }
382         return 0;
383 }
384
385 /*
386  * insert an extent_state struct into the tree.  'bits' are set on the
387  * struct before it is inserted.
388  *
389  * This may return -EEXIST if the extent is already there, in which case the
390  * state struct is freed.
391  *
392  * The tree lock is not taken internally.  This is a utility function and
393  * probably isn't what you want to call (see set/clear_extent_bit).
394  */
395 static int insert_state(struct extent_map_tree *tree,
396                         struct extent_state *state, u64 start, u64 end,
397                         int bits)
398 {
399         struct rb_node *node;
400
401         if (end < start) {
402                 printk("end < start %Lu %Lu\n", end, start);
403                 WARN_ON(1);
404         }
405         state->state |= bits;
406         state->start = start;
407         state->end = end;
408         node = tree_insert(&tree->state, end, &state->rb_node);
409         if (node) {
410                 struct extent_state *found;
411                 found = rb_entry(node, struct extent_state, rb_node);
412                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
413                 free_extent_state(state);
414                 return -EEXIST;
415         }
416         merge_state(tree, state);
417         return 0;
418 }
419
420 /*
421  * split a given extent state struct in two, inserting the preallocated
422  * struct 'prealloc' as the newly created second half.  'split' indicates an
423  * offset inside 'orig' where it should be split.
424  *
425  * Before calling,
426  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
427  * are two extent state structs in the tree:
428  * prealloc: [orig->start, split - 1]
429  * orig: [ split, orig->end ]
430  *
431  * The tree locks are not taken by this function. They need to be held
432  * by the caller.
433  */
434 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
435                        struct extent_state *prealloc, u64 split)
436 {
437         struct rb_node *node;
438         prealloc->start = orig->start;
439         prealloc->end = split - 1;
440         prealloc->state = orig->state;
441         orig->start = split;
442
443         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
444         if (node) {
445                 struct extent_state *found;
446                 found = rb_entry(node, struct extent_state, rb_node);
447                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
448                 free_extent_state(prealloc);
449                 return -EEXIST;
450         }
451         return 0;
452 }
453
454 /*
455  * utility function to clear some bits in an extent state struct.
456  * it will optionally wake up any one waiting on this state (wake == 1), or
457  * forcibly remove the state from the tree (delete == 1).
458  *
459  * If no bits are set on the state struct after clearing things, the
460  * struct is freed and removed from the tree
461  */
462 static int clear_state_bit(struct extent_map_tree *tree,
463                             struct extent_state *state, int bits, int wake,
464                             int delete)
465 {
466         int ret = state->state & bits;
467         state->state &= ~bits;
468         if (wake)
469                 wake_up(&state->wq);
470         if (delete || state->state == 0) {
471                 if (state->in_tree) {
472                         rb_erase(&state->rb_node, &tree->state);
473                         state->in_tree = 0;
474                         free_extent_state(state);
475                 } else {
476                         WARN_ON(1);
477                 }
478         } else {
479                 merge_state(tree, state);
480         }
481         return ret;
482 }
483
484 /*
485  * clear some bits on a range in the tree.  This may require splitting
486  * or inserting elements in the tree, so the gfp mask is used to
487  * indicate which allocations or sleeping are allowed.
488  *
489  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
490  * the given range from the tree regardless of state (ie for truncate).
491  *
492  * the range [start, end] is inclusive.
493  *
494  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
495  * bits were already set, or zero if none of the bits were already set.
496  */
497 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
498                      int bits, int wake, int delete, gfp_t mask)
499 {
500         struct extent_state *state;
501         struct extent_state *prealloc = NULL;
502         struct rb_node *node;
503         unsigned long flags;
504         int err;
505         int set = 0;
506
507 again:
508         if (!prealloc && (mask & __GFP_WAIT)) {
509                 prealloc = alloc_extent_state(mask);
510                 if (!prealloc)
511                         return -ENOMEM;
512         }
513
514         write_lock_irqsave(&tree->lock, flags);
515         /*
516          * this search will find the extents that end after
517          * our range starts
518          */
519         node = tree_search(&tree->state, start);
520         if (!node)
521                 goto out;
522         state = rb_entry(node, struct extent_state, rb_node);
523         if (state->start > end)
524                 goto out;
525         WARN_ON(state->end < start);
526
527         /*
528          *     | ---- desired range ---- |
529          *  | state | or
530          *  | ------------- state -------------- |
531          *
532          * We need to split the extent we found, and may flip
533          * bits on second half.
534          *
535          * If the extent we found extends past our range, we
536          * just split and search again.  It'll get split again
537          * the next time though.
538          *
539          * If the extent we found is inside our range, we clear
540          * the desired bit on it.
541          */
542
543         if (state->start < start) {
544                 err = split_state(tree, state, prealloc, start);
545                 BUG_ON(err == -EEXIST);
546                 prealloc = NULL;
547                 if (err)
548                         goto out;
549                 if (state->end <= end) {
550                         start = state->end + 1;
551                         set |= clear_state_bit(tree, state, bits,
552                                         wake, delete);
553                 } else {
554                         start = state->start;
555                 }
556                 goto search_again;
557         }
558         /*
559          * | ---- desired range ---- |
560          *                        | state |
561          * We need to split the extent, and clear the bit
562          * on the first half
563          */
564         if (state->start <= end && state->end > end) {
565                 err = split_state(tree, state, prealloc, end + 1);
566                 BUG_ON(err == -EEXIST);
567
568                 if (wake)
569                         wake_up(&state->wq);
570                 set |= clear_state_bit(tree, prealloc, bits,
571                                        wake, delete);
572                 prealloc = NULL;
573                 goto out;
574         }
575
576         start = state->end + 1;
577         set |= clear_state_bit(tree, state, bits, wake, delete);
578         goto search_again;
579
580 out:
581         write_unlock_irqrestore(&tree->lock, flags);
582         if (prealloc)
583                 free_extent_state(prealloc);
584
585         return set;
586
587 search_again:
588         if (start > end)
589                 goto out;
590         write_unlock_irqrestore(&tree->lock, flags);
591         if (mask & __GFP_WAIT)
592                 cond_resched();
593         goto again;
594 }
595 EXPORT_SYMBOL(clear_extent_bit);
596
597 static int wait_on_state(struct extent_map_tree *tree,
598                          struct extent_state *state)
599 {
600         DEFINE_WAIT(wait);
601         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
602         read_unlock_irq(&tree->lock);
603         schedule();
604         read_lock_irq(&tree->lock);
605         finish_wait(&state->wq, &wait);
606         return 0;
607 }
608
609 /*
610  * waits for one or more bits to clear on a range in the state tree.
611  * The range [start, end] is inclusive.
612  * The tree lock is taken by this function
613  */
614 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
615 {
616         struct extent_state *state;
617         struct rb_node *node;
618
619         read_lock_irq(&tree->lock);
620 again:
621         while (1) {
622                 /*
623                  * this search will find all the extents that end after
624                  * our range starts
625                  */
626                 node = tree_search(&tree->state, start);
627                 if (!node)
628                         break;
629
630                 state = rb_entry(node, struct extent_state, rb_node);
631
632                 if (state->start > end)
633                         goto out;
634
635                 if (state->state & bits) {
636                         start = state->start;
637                         atomic_inc(&state->refs);
638                         wait_on_state(tree, state);
639                         free_extent_state(state);
640                         goto again;
641                 }
642                 start = state->end + 1;
643
644                 if (start > end)
645                         break;
646
647                 if (need_resched()) {
648                         read_unlock_irq(&tree->lock);
649                         cond_resched();
650                         read_lock_irq(&tree->lock);
651                 }
652         }
653 out:
654         read_unlock_irq(&tree->lock);
655         return 0;
656 }
657 EXPORT_SYMBOL(wait_extent_bit);
658
659 /*
660  * set some bits on a range in the tree.  This may require allocations
661  * or sleeping, so the gfp mask is used to indicate what is allowed.
662  *
663  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
664  * range already has the desired bits set.  The start of the existing
665  * range is returned in failed_start in this case.
666  *
667  * [start, end] is inclusive
668  * This takes the tree lock.
669  */
670 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
671                    int exclusive, u64 *failed_start, gfp_t mask)
672 {
673         struct extent_state *state;
674         struct extent_state *prealloc = NULL;
675         struct rb_node *node;
676         unsigned long flags;
677         int err = 0;
678         int set;
679         u64 last_start;
680         u64 last_end;
681 again:
682         if (!prealloc && (mask & __GFP_WAIT)) {
683                 prealloc = alloc_extent_state(mask);
684                 if (!prealloc)
685                         return -ENOMEM;
686         }
687
688         write_lock_irqsave(&tree->lock, flags);
689         /*
690          * this search will find all the extents that end after
691          * our range starts.
692          */
693         node = tree_search(&tree->state, start);
694         if (!node) {
695                 err = insert_state(tree, prealloc, start, end, bits);
696                 prealloc = NULL;
697                 BUG_ON(err == -EEXIST);
698                 goto out;
699         }
700
701         state = rb_entry(node, struct extent_state, rb_node);
702         last_start = state->start;
703         last_end = state->end;
704
705         /*
706          * | ---- desired range ---- |
707          * | state |
708          *
709          * Just lock what we found and keep going
710          */
711         if (state->start == start && state->end <= end) {
712                 set = state->state & bits;
713                 if (set && exclusive) {
714                         *failed_start = state->start;
715                         err = -EEXIST;
716                         goto out;
717                 }
718                 state->state |= bits;
719                 start = state->end + 1;
720                 merge_state(tree, state);
721                 goto search_again;
722         }
723
724         /*
725          *     | ---- desired range ---- |
726          * | state |
727          *   or
728          * | ------------- state -------------- |
729          *
730          * We need to split the extent we found, and may flip bits on
731          * second half.
732          *
733          * If the extent we found extends past our
734          * range, we just split and search again.  It'll get split
735          * again the next time though.
736          *
737          * If the extent we found is inside our range, we set the
738          * desired bit on it.
739          */
740         if (state->start < start) {
741                 set = state->state & bits;
742                 if (exclusive && set) {
743                         *failed_start = start;
744                         err = -EEXIST;
745                         goto out;
746                 }
747                 err = split_state(tree, state, prealloc, start);
748                 BUG_ON(err == -EEXIST);
749                 prealloc = NULL;
750                 if (err)
751                         goto out;
752                 if (state->end <= end) {
753                         state->state |= bits;
754                         start = state->end + 1;
755                         merge_state(tree, state);
756                 } else {
757                         start = state->start;
758                 }
759                 goto search_again;
760         }
761         /*
762          * | ---- desired range ---- |
763          *     | state | or               | state |
764          *
765          * There's a hole, we need to insert something in it and
766          * ignore the extent we found.
767          */
768         if (state->start > start) {
769                 u64 this_end;
770                 if (end < last_start)
771                         this_end = end;
772                 else
773                         this_end = last_start -1;
774                 err = insert_state(tree, prealloc, start, this_end,
775                                    bits);
776                 prealloc = NULL;
777                 BUG_ON(err == -EEXIST);
778                 if (err)
779                         goto out;
780                 start = this_end + 1;
781                 goto search_again;
782         }
783         /*
784          * | ---- desired range ---- |
785          *                        | state |
786          * We need to split the extent, and set the bit
787          * on the first half
788          */
789         if (state->start <= end && state->end > end) {
790                 set = state->state & bits;
791                 if (exclusive && set) {
792                         *failed_start = start;
793                         err = -EEXIST;
794                         goto out;
795                 }
796                 err = split_state(tree, state, prealloc, end + 1);
797                 BUG_ON(err == -EEXIST);
798
799                 prealloc->state |= bits;
800                 merge_state(tree, prealloc);
801                 prealloc = NULL;
802                 goto out;
803         }
804
805         goto search_again;
806
807 out:
808         write_unlock_irqrestore(&tree->lock, flags);
809         if (prealloc)
810                 free_extent_state(prealloc);
811
812         return err;
813
814 search_again:
815         if (start > end)
816                 goto out;
817         write_unlock_irqrestore(&tree->lock, flags);
818         if (mask & __GFP_WAIT)
819                 cond_resched();
820         goto again;
821 }
822 EXPORT_SYMBOL(set_extent_bit);
823
824 /* wrappers around set/clear extent bit */
825 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
826                      gfp_t mask)
827 {
828         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
829                               mask);
830 }
831 EXPORT_SYMBOL(set_extent_dirty);
832
833 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
834                     int bits, gfp_t mask)
835 {
836         return set_extent_bit(tree, start, end, bits, 0, NULL,
837                               mask);
838 }
839 EXPORT_SYMBOL(set_extent_bits);
840
841 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
842                       int bits, gfp_t mask)
843 {
844         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845 }
846 EXPORT_SYMBOL(clear_extent_bits);
847
848 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
849                      gfp_t mask)
850 {
851         return set_extent_bit(tree, start, end,
852                               EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
853                               mask);
854 }
855 EXPORT_SYMBOL(set_extent_delalloc);
856
857 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
858                        gfp_t mask)
859 {
860         return clear_extent_bit(tree, start, end,
861                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
862 }
863 EXPORT_SYMBOL(clear_extent_dirty);
864
865 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
866                      gfp_t mask)
867 {
868         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
869                               mask);
870 }
871 EXPORT_SYMBOL(set_extent_new);
872
873 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
874                        gfp_t mask)
875 {
876         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
877 }
878 EXPORT_SYMBOL(clear_extent_new);
879
880 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
881                         gfp_t mask)
882 {
883         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
884                               mask);
885 }
886 EXPORT_SYMBOL(set_extent_uptodate);
887
888 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
889                           gfp_t mask)
890 {
891         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
892 }
893 EXPORT_SYMBOL(clear_extent_uptodate);
894
895 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
896                          gfp_t mask)
897 {
898         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
899                               0, NULL, mask);
900 }
901 EXPORT_SYMBOL(set_extent_writeback);
902
903 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
904                            gfp_t mask)
905 {
906         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
907 }
908 EXPORT_SYMBOL(clear_extent_writeback);
909
910 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
911 {
912         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
913 }
914 EXPORT_SYMBOL(wait_on_extent_writeback);
915
916 /*
917  * locks a range in ascending order, waiting for any locked regions
918  * it hits on the way.  [start,end] are inclusive, and this will sleep.
919  */
920 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
921 {
922         int err;
923         u64 failed_start;
924         while (1) {
925                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
926                                      &failed_start, mask);
927                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
928                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
929                         start = failed_start;
930                 } else {
931                         break;
932                 }
933                 WARN_ON(start > end);
934         }
935         return err;
936 }
937 EXPORT_SYMBOL(lock_extent);
938
939 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
940                   gfp_t mask)
941 {
942         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
943 }
944 EXPORT_SYMBOL(unlock_extent);
945
946 /*
947  * helper function to set pages and extents in the tree dirty
948  */
949 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
950 {
951         unsigned long index = start >> PAGE_CACHE_SHIFT;
952         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
953         struct page *page;
954
955         while (index <= end_index) {
956                 page = find_get_page(tree->mapping, index);
957                 BUG_ON(!page);
958                 __set_page_dirty_nobuffers(page);
959                 page_cache_release(page);
960                 index++;
961         }
962         set_extent_dirty(tree, start, end, GFP_NOFS);
963         return 0;
964 }
965 EXPORT_SYMBOL(set_range_dirty);
966
967 /*
968  * helper function to set both pages and extents in the tree writeback
969  */
970 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
971 {
972         unsigned long index = start >> PAGE_CACHE_SHIFT;
973         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
974         struct page *page;
975
976         while (index <= end_index) {
977                 page = find_get_page(tree->mapping, index);
978                 BUG_ON(!page);
979                 set_page_writeback(page);
980                 page_cache_release(page);
981                 index++;
982         }
983         set_extent_writeback(tree, start, end, GFP_NOFS);
984         return 0;
985 }
986 EXPORT_SYMBOL(set_range_writeback);
987
988 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
989                           u64 *start_ret, u64 *end_ret, int bits)
990 {
991         struct rb_node *node;
992         struct extent_state *state;
993         int ret = 1;
994
995         read_lock_irq(&tree->lock);
996         /*
997          * this search will find all the extents that end after
998          * our range starts.
999          */
1000         node = tree_search(&tree->state, start);
1001         if (!node || IS_ERR(node)) {
1002                 goto out;
1003         }
1004
1005         while(1) {
1006                 state = rb_entry(node, struct extent_state, rb_node);
1007                 if (state->end >= start && (state->state & bits)) {
1008                         *start_ret = state->start;
1009                         *end_ret = state->end;
1010                         ret = 0;
1011                         break;
1012                 }
1013                 node = rb_next(node);
1014                 if (!node)
1015                         break;
1016         }
1017 out:
1018         read_unlock_irq(&tree->lock);
1019         return ret;
1020 }
1021 EXPORT_SYMBOL(find_first_extent_bit);
1022
1023 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1024                              u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1025 {
1026         struct rb_node *node;
1027         struct extent_state *state;
1028         u64 cur_start = start;
1029         u64 found = 0;
1030         u64 total_bytes = 0;
1031
1032         write_lock_irq(&tree->lock);
1033         /*
1034          * this search will find all the extents that end after
1035          * our range starts.
1036          */
1037 search_again:
1038         node = tree_search(&tree->state, cur_start);
1039         if (!node || IS_ERR(node)) {
1040                 goto out;
1041         }
1042
1043         while(1) {
1044                 state = rb_entry(node, struct extent_state, rb_node);
1045                 if (state->start != cur_start) {
1046                         goto out;
1047                 }
1048                 if (!(state->state & EXTENT_DELALLOC)) {
1049                         goto out;
1050                 }
1051                 if (state->start >= lock_start) {
1052                         if (state->state & EXTENT_LOCKED) {
1053                                 DEFINE_WAIT(wait);
1054                                 atomic_inc(&state->refs);
1055                                 prepare_to_wait(&state->wq, &wait,
1056                                                 TASK_UNINTERRUPTIBLE);
1057                                 write_unlock_irq(&tree->lock);
1058                                 schedule();
1059                                 write_lock_irq(&tree->lock);
1060                                 finish_wait(&state->wq, &wait);
1061                                 free_extent_state(state);
1062                                 goto search_again;
1063                         }
1064                         state->state |= EXTENT_LOCKED;
1065                 }
1066                 found++;
1067                 *end = state->end;
1068                 cur_start = state->end + 1;
1069                 node = rb_next(node);
1070                 if (!node)
1071                         break;
1072                 total_bytes += state->end - state->start + 1;
1073                 if (total_bytes >= max_bytes)
1074                         break;
1075         }
1076 out:
1077         write_unlock_irq(&tree->lock);
1078         return found;
1079 }
1080
1081 /*
1082  * helper function to lock both pages and extents in the tree.
1083  * pages must be locked first.
1084  */
1085 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1086 {
1087         unsigned long index = start >> PAGE_CACHE_SHIFT;
1088         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1089         struct page *page;
1090         int err;
1091
1092         while (index <= end_index) {
1093                 page = grab_cache_page(tree->mapping, index);
1094                 if (!page) {
1095                         err = -ENOMEM;
1096                         goto failed;
1097                 }
1098                 if (IS_ERR(page)) {
1099                         err = PTR_ERR(page);
1100                         goto failed;
1101                 }
1102                 index++;
1103         }
1104         lock_extent(tree, start, end, GFP_NOFS);
1105         return 0;
1106
1107 failed:
1108         /*
1109          * we failed above in getting the page at 'index', so we undo here
1110          * up to but not including the page at 'index'
1111          */
1112         end_index = index;
1113         index = start >> PAGE_CACHE_SHIFT;
1114         while (index < end_index) {
1115                 page = find_get_page(tree->mapping, index);
1116                 unlock_page(page);
1117                 page_cache_release(page);
1118                 index++;
1119         }
1120         return err;
1121 }
1122 EXPORT_SYMBOL(lock_range);
1123
1124 /*
1125  * helper function to unlock both pages and extents in the tree.
1126  */
1127 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1128 {
1129         unsigned long index = start >> PAGE_CACHE_SHIFT;
1130         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1131         struct page *page;
1132
1133         while (index <= end_index) {
1134                 page = find_get_page(tree->mapping, index);
1135                 unlock_page(page);
1136                 page_cache_release(page);
1137                 index++;
1138         }
1139         unlock_extent(tree, start, end, GFP_NOFS);
1140         return 0;
1141 }
1142 EXPORT_SYMBOL(unlock_range);
1143
1144 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1145 {
1146         struct rb_node *node;
1147         struct extent_state *state;
1148         int ret = 0;
1149
1150         write_lock_irq(&tree->lock);
1151         /*
1152          * this search will find all the extents that end after
1153          * our range starts.
1154          */
1155         node = tree_search(&tree->state, start);
1156         if (!node || IS_ERR(node)) {
1157                 ret = -ENOENT;
1158                 goto out;
1159         }
1160         state = rb_entry(node, struct extent_state, rb_node);
1161         if (state->start != start) {
1162                 ret = -ENOENT;
1163                 goto out;
1164         }
1165         state->private = private;
1166 out:
1167         write_unlock_irq(&tree->lock);
1168         return ret;
1169 }
1170
1171 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1172 {
1173         struct rb_node *node;
1174         struct extent_state *state;
1175         int ret = 0;
1176
1177         read_lock_irq(&tree->lock);
1178         /*
1179          * this search will find all the extents that end after
1180          * our range starts.
1181          */
1182         node = tree_search(&tree->state, start);
1183         if (!node || IS_ERR(node)) {
1184                 ret = -ENOENT;
1185                 goto out;
1186         }
1187         state = rb_entry(node, struct extent_state, rb_node);
1188         if (state->start != start) {
1189                 ret = -ENOENT;
1190                 goto out;
1191         }
1192         *private = state->private;
1193 out:
1194         read_unlock_irq(&tree->lock);
1195         return ret;
1196 }
1197
1198 /*
1199  * searches a range in the state tree for a given mask.
1200  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1201  * has the bits set.  Otherwise, 1 is returned if any bit in the
1202  * range is found set.
1203  */
1204 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1205                    int bits, int filled)
1206 {
1207         struct extent_state *state = NULL;
1208         struct rb_node *node;
1209         int bitset = 0;
1210
1211         read_lock_irq(&tree->lock);
1212         node = tree_search(&tree->state, start);
1213         while (node && start <= end) {
1214                 state = rb_entry(node, struct extent_state, rb_node);
1215                 if (state->start > end)
1216                         break;
1217
1218                 if (filled && state->start > start) {
1219                         bitset = 0;
1220                         break;
1221                 }
1222                 if (state->state & bits) {
1223                         bitset = 1;
1224                         if (!filled)
1225                                 break;
1226                 } else if (filled) {
1227                         bitset = 0;
1228                         break;
1229                 }
1230                 start = state->end + 1;
1231                 if (start > end)
1232                         break;
1233                 node = rb_next(node);
1234         }
1235         read_unlock_irq(&tree->lock);
1236         return bitset;
1237 }
1238 EXPORT_SYMBOL(test_range_bit);
1239
1240 /*
1241  * helper function to set a given page up to date if all the
1242  * extents in the tree for that page are up to date
1243  */
1244 static int check_page_uptodate(struct extent_map_tree *tree,
1245                                struct page *page)
1246 {
1247         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1248         u64 end = start + PAGE_CACHE_SIZE - 1;
1249         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1250                 SetPageUptodate(page);
1251         return 0;
1252 }
1253
1254 /*
1255  * helper function to unlock a page if all the extents in the tree
1256  * for that page are unlocked
1257  */
1258 static int check_page_locked(struct extent_map_tree *tree,
1259                              struct page *page)
1260 {
1261         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1262         u64 end = start + PAGE_CACHE_SIZE - 1;
1263         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1264                 unlock_page(page);
1265         return 0;
1266 }
1267
1268 /*
1269  * helper function to end page writeback if all the extents
1270  * in the tree for that page are done with writeback
1271  */
1272 static int check_page_writeback(struct extent_map_tree *tree,
1273                              struct page *page)
1274 {
1275         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1276         u64 end = start + PAGE_CACHE_SIZE - 1;
1277         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1278                 end_page_writeback(page);
1279         return 0;
1280 }
1281
1282 /* lots and lots of room for performance fixes in the end_bio funcs */
1283
1284 /*
1285  * after a writepage IO is done, we need to:
1286  * clear the uptodate bits on error
1287  * clear the writeback bits in the extent tree for this IO
1288  * end_page_writeback if the page has no more pending IO
1289  *
1290  * Scheduling is not allowed, so the extent state tree is expected
1291  * to have one and only one object corresponding to this IO.
1292  */
1293 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1294 static void end_bio_extent_writepage(struct bio *bio, int err)
1295 #else
1296 static int end_bio_extent_writepage(struct bio *bio,
1297                                    unsigned int bytes_done, int err)
1298 #endif
1299 {
1300         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1301         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1302         struct extent_map_tree *tree = bio->bi_private;
1303         u64 start;
1304         u64 end;
1305         int whole_page;
1306
1307 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1308         if (bio->bi_size)
1309                 return 1;
1310 #endif
1311
1312         do {
1313                 struct page *page = bvec->bv_page;
1314                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1315                          bvec->bv_offset;
1316                 end = start + bvec->bv_len - 1;
1317
1318                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1319                         whole_page = 1;
1320                 else
1321                         whole_page = 0;
1322
1323                 if (--bvec >= bio->bi_io_vec)
1324                         prefetchw(&bvec->bv_page->flags);
1325
1326                 if (!uptodate) {
1327                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1328                         ClearPageUptodate(page);
1329                         SetPageError(page);
1330                 }
1331                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1332
1333                 if (whole_page)
1334                         end_page_writeback(page);
1335                 else
1336                         check_page_writeback(tree, page);
1337                 if (tree->ops && tree->ops->writepage_end_io_hook)
1338                         tree->ops->writepage_end_io_hook(page, start, end);
1339         } while (bvec >= bio->bi_io_vec);
1340
1341         bio_put(bio);
1342 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1343         return 0;
1344 #endif
1345 }
1346
1347 /*
1348  * after a readpage IO is done, we need to:
1349  * clear the uptodate bits on error
1350  * set the uptodate bits if things worked
1351  * set the page up to date if all extents in the tree are uptodate
1352  * clear the lock bit in the extent tree
1353  * unlock the page if there are no other extents locked for it
1354  *
1355  * Scheduling is not allowed, so the extent state tree is expected
1356  * to have one and only one object corresponding to this IO.
1357  */
1358 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1359 static void end_bio_extent_readpage(struct bio *bio, int err)
1360 #else
1361 static int end_bio_extent_readpage(struct bio *bio,
1362                                    unsigned int bytes_done, int err)
1363 #endif
1364 {
1365         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1366         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1367         struct extent_map_tree *tree = bio->bi_private;
1368         u64 start;
1369         u64 end;
1370         int whole_page;
1371         int ret;
1372
1373 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1374         if (bio->bi_size)
1375                 return 1;
1376 #endif
1377
1378         do {
1379                 struct page *page = bvec->bv_page;
1380                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1381                         bvec->bv_offset;
1382                 end = start + bvec->bv_len - 1;
1383
1384                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1385                         whole_page = 1;
1386                 else
1387                         whole_page = 0;
1388
1389                 if (--bvec >= bio->bi_io_vec)
1390                         prefetchw(&bvec->bv_page->flags);
1391
1392                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1393                         ret = tree->ops->readpage_end_io_hook(page, start, end);
1394                         if (ret)
1395                                 uptodate = 0;
1396                 }
1397                 if (uptodate) {
1398                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1399                         if (whole_page)
1400                                 SetPageUptodate(page);
1401                         else
1402                                 check_page_uptodate(tree, page);
1403                 } else {
1404                         ClearPageUptodate(page);
1405                         SetPageError(page);
1406                 }
1407
1408                 unlock_extent(tree, start, end, GFP_ATOMIC);
1409
1410                 if (whole_page)
1411                         unlock_page(page);
1412                 else
1413                         check_page_locked(tree, page);
1414         } while (bvec >= bio->bi_io_vec);
1415
1416         bio_put(bio);
1417 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1418         return 0;
1419 #endif
1420 }
1421
1422 /*
1423  * IO done from prepare_write is pretty simple, we just unlock
1424  * the structs in the extent tree when done, and set the uptodate bits
1425  * as appropriate.
1426  */
1427 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1428 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1429 #else
1430 static int end_bio_extent_preparewrite(struct bio *bio,
1431                                        unsigned int bytes_done, int err)
1432 #endif
1433 {
1434         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1435         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1436         struct extent_map_tree *tree = bio->bi_private;
1437         u64 start;
1438         u64 end;
1439
1440 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1441         if (bio->bi_size)
1442                 return 1;
1443 #endif
1444
1445         do {
1446                 struct page *page = bvec->bv_page;
1447                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1448                         bvec->bv_offset;
1449                 end = start + bvec->bv_len - 1;
1450
1451                 if (--bvec >= bio->bi_io_vec)
1452                         prefetchw(&bvec->bv_page->flags);
1453
1454                 if (uptodate) {
1455                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1456                 } else {
1457                         ClearPageUptodate(page);
1458                         SetPageError(page);
1459                 }
1460
1461                 unlock_extent(tree, start, end, GFP_ATOMIC);
1462
1463         } while (bvec >= bio->bi_io_vec);
1464
1465         bio_put(bio);
1466 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1467         return 0;
1468 #endif
1469 }
1470
1471 static struct bio *
1472 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1473                  gfp_t gfp_flags)
1474 {
1475         struct bio *bio;
1476
1477         bio = bio_alloc(gfp_flags, nr_vecs);
1478
1479         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1480                 while (!bio && (nr_vecs /= 2))
1481                         bio = bio_alloc(gfp_flags, nr_vecs);
1482         }
1483
1484         if (bio) {
1485                 bio->bi_bdev = bdev;
1486                 bio->bi_sector = first_sector;
1487         }
1488         return bio;
1489 }
1490
1491 static int submit_one_bio(int rw, struct bio *bio)
1492 {
1493         int ret = 0;
1494         bio_get(bio);
1495         submit_bio(rw, bio);
1496         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1497                 ret = -EOPNOTSUPP;
1498         bio_put(bio);
1499         return ret;
1500 }
1501
1502 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1503                               struct page *page, sector_t sector,
1504                               size_t size, unsigned long offset,
1505                               struct block_device *bdev,
1506                               struct bio **bio_ret,
1507                               unsigned long max_pages,
1508                               bio_end_io_t end_io_func)
1509 {
1510         int ret = 0;
1511         struct bio *bio;
1512         int nr;
1513
1514         if (bio_ret && *bio_ret) {
1515                 bio = *bio_ret;
1516                 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1517                     bio_add_page(bio, page, size, offset) < size) {
1518                         ret = submit_one_bio(rw, bio);
1519                         bio = NULL;
1520                 } else {
1521                         return 0;
1522                 }
1523         }
1524         nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1525         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1526         if (!bio) {
1527                 printk("failed to allocate bio nr %d\n", nr);
1528         }
1529         bio_add_page(bio, page, size, offset);
1530         bio->bi_end_io = end_io_func;
1531         bio->bi_private = tree;
1532         if (bio_ret) {
1533                 *bio_ret = bio;
1534         } else {
1535                 ret = submit_one_bio(rw, bio);
1536         }
1537
1538         return ret;
1539 }
1540
1541 void set_page_extent_mapped(struct page *page)
1542 {
1543         if (!PagePrivate(page)) {
1544                 SetPagePrivate(page);
1545                 WARN_ON(!page->mapping->a_ops->invalidatepage);
1546                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1547                 page_cache_get(page);
1548         }
1549 }
1550
1551 /*
1552  * basic readpage implementation.  Locked extent state structs are inserted
1553  * into the tree that are removed when the IO is done (by the end_io
1554  * handlers)
1555  */
1556 static int __extent_read_full_page(struct extent_map_tree *tree,
1557                                    struct page *page,
1558                                    get_extent_t *get_extent,
1559                                    struct bio **bio)
1560 {
1561         struct inode *inode = page->mapping->host;
1562         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1563         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1564         u64 end;
1565         u64 cur = start;
1566         u64 extent_offset;
1567         u64 last_byte = i_size_read(inode);
1568         u64 block_start;
1569         u64 cur_end;
1570         sector_t sector;
1571         struct extent_map *em;
1572         struct block_device *bdev;
1573         int ret;
1574         int nr = 0;
1575         size_t page_offset = 0;
1576         size_t iosize;
1577         size_t blocksize = inode->i_sb->s_blocksize;
1578
1579         set_page_extent_mapped(page);
1580
1581         end = page_end;
1582         lock_extent(tree, start, end, GFP_NOFS);
1583
1584         while (cur <= end) {
1585                 if (cur >= last_byte) {
1586                         iosize = PAGE_CACHE_SIZE - page_offset;
1587                         zero_user_page(page, page_offset, iosize, KM_USER0);
1588                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1589                                             GFP_NOFS);
1590                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1591                         break;
1592                 }
1593                 em = get_extent(inode, page, page_offset, cur, end, 0);
1594                 if (IS_ERR(em) || !em) {
1595                         SetPageError(page);
1596                         unlock_extent(tree, cur, end, GFP_NOFS);
1597                         break;
1598                 }
1599
1600                 extent_offset = cur - em->start;
1601                 BUG_ON(em->end < cur);
1602                 BUG_ON(end < cur);
1603
1604                 iosize = min(em->end - cur, end - cur) + 1;
1605                 cur_end = min(em->end, end);
1606                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1607                 sector = (em->block_start + extent_offset) >> 9;
1608                 bdev = em->bdev;
1609                 block_start = em->block_start;
1610                 free_extent_map(em);
1611                 em = NULL;
1612
1613                 /* we've found a hole, just zero and go on */
1614                 if (block_start == EXTENT_MAP_HOLE) {
1615                         zero_user_page(page, page_offset, iosize, KM_USER0);
1616                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1617                                             GFP_NOFS);
1618                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1619                         cur = cur + iosize;
1620                         page_offset += iosize;
1621                         continue;
1622                 }
1623                 /* the get_extent function already copied into the page */
1624                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1625                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1626                         cur = cur + iosize;
1627                         page_offset += iosize;
1628                         continue;
1629                 }
1630
1631                 ret = 0;
1632                 if (tree->ops && tree->ops->readpage_io_hook) {
1633                         ret = tree->ops->readpage_io_hook(page, cur,
1634                                                           cur + iosize - 1);
1635                 }
1636                 if (!ret) {
1637                         unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1638                         nr -= page->index;
1639                         ret = submit_extent_page(READ, tree, page,
1640                                          sector, iosize, page_offset,
1641                                          bdev, bio, nr,
1642                                          end_bio_extent_readpage);
1643                 }
1644                 if (ret)
1645                         SetPageError(page);
1646                 cur = cur + iosize;
1647                 page_offset += iosize;
1648                 nr++;
1649         }
1650         if (!nr) {
1651                 if (!PageError(page))
1652                         SetPageUptodate(page);
1653                 unlock_page(page);
1654         }
1655         return 0;
1656 }
1657
1658 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1659                             get_extent_t *get_extent)
1660 {
1661         struct bio *bio = NULL;
1662         int ret;
1663
1664         ret = __extent_read_full_page(tree, page, get_extent, &bio);
1665         if (bio)
1666                 submit_one_bio(READ, bio);
1667         return ret;
1668 }
1669 EXPORT_SYMBOL(extent_read_full_page);
1670
1671 /*
1672  * the writepage semantics are similar to regular writepage.  extent
1673  * records are inserted to lock ranges in the tree, and as dirty areas
1674  * are found, they are marked writeback.  Then the lock bits are removed
1675  * and the end_io handler clears the writeback ranges
1676  */
1677 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1678                               void *data)
1679 {
1680         struct inode *inode = page->mapping->host;
1681         struct extent_page_data *epd = data;
1682         struct extent_map_tree *tree = epd->tree;
1683         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1684         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1685         u64 end;
1686         u64 cur = start;
1687         u64 extent_offset;
1688         u64 last_byte = i_size_read(inode);
1689         u64 block_start;
1690         u64 iosize;
1691         sector_t sector;
1692         struct extent_map *em;
1693         struct block_device *bdev;
1694         int ret;
1695         int nr = 0;
1696         size_t page_offset = 0;
1697         size_t blocksize;
1698         loff_t i_size = i_size_read(inode);
1699         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1700         u64 nr_delalloc;
1701         u64 delalloc_end;
1702
1703         WARN_ON(!PageLocked(page));
1704         if (page->index > end_index) {
1705                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1706                 unlock_page(page);
1707                 return 0;
1708         }
1709
1710         if (page->index == end_index) {
1711                 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1712                 zero_user_page(page, offset,
1713                                PAGE_CACHE_SIZE - offset, KM_USER0);
1714         }
1715
1716         set_page_extent_mapped(page);
1717
1718         lock_extent(tree, start, page_end, GFP_NOFS);
1719         nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1720                                                &delalloc_end,
1721                                                128 * 1024 * 1024);
1722         if (nr_delalloc) {
1723                 tree->ops->fill_delalloc(inode, start, delalloc_end);
1724                 if (delalloc_end >= page_end + 1) {
1725                         clear_extent_bit(tree, page_end + 1, delalloc_end,
1726                                          EXTENT_LOCKED | EXTENT_DELALLOC,
1727                                          1, 0, GFP_NOFS);
1728                 }
1729                 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1730                                  0, 0, GFP_NOFS);
1731                 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1732                         printk("found delalloc bits after clear extent_bit\n");
1733                 }
1734         } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1735                 printk("found delalloc bits after find_delalloc_range returns 0\n");
1736         }
1737
1738         end = page_end;
1739         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1740                 printk("found delalloc bits after lock_extent\n");
1741         }
1742
1743         if (last_byte <= start) {
1744                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1745                 goto done;
1746         }
1747
1748         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1749         blocksize = inode->i_sb->s_blocksize;
1750
1751         while (cur <= end) {
1752                 if (cur >= last_byte) {
1753                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1754                         break;
1755                 }
1756                 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1757                 if (IS_ERR(em) || !em) {
1758                         SetPageError(page);
1759                         break;
1760                 }
1761
1762                 extent_offset = cur - em->start;
1763                 BUG_ON(em->end < cur);
1764                 BUG_ON(end < cur);
1765                 iosize = min(em->end - cur, end - cur) + 1;
1766                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1767                 sector = (em->block_start + extent_offset) >> 9;
1768                 bdev = em->bdev;
1769                 block_start = em->block_start;
1770                 free_extent_map(em);
1771                 em = NULL;
1772
1773                 if (block_start == EXTENT_MAP_HOLE ||
1774                     block_start == EXTENT_MAP_INLINE) {
1775                         clear_extent_dirty(tree, cur,
1776                                            cur + iosize - 1, GFP_NOFS);
1777                         cur = cur + iosize;
1778                         page_offset += iosize;
1779                         continue;
1780                 }
1781
1782                 /* leave this out until we have a page_mkwrite call */
1783                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1784                                    EXTENT_DIRTY, 0)) {
1785                         cur = cur + iosize;
1786                         page_offset += iosize;
1787                         continue;
1788                 }
1789                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1790                 if (tree->ops && tree->ops->writepage_io_hook) {
1791                         ret = tree->ops->writepage_io_hook(page, cur,
1792                                                 cur + iosize - 1);
1793                 } else {
1794                         ret = 0;
1795                 }
1796                 if (ret)
1797                         SetPageError(page);
1798                 else {
1799                         unsigned long nr = end_index + 1;
1800                         set_range_writeback(tree, cur, cur + iosize - 1);
1801
1802                         ret = submit_extent_page(WRITE, tree, page, sector,
1803                                                  iosize, page_offset, bdev,
1804                                                  &epd->bio, nr,
1805                                                  end_bio_extent_writepage);
1806                         if (ret)
1807                                 SetPageError(page);
1808                 }
1809                 cur = cur + iosize;
1810                 page_offset += iosize;
1811                 nr++;
1812         }
1813 done:
1814         unlock_extent(tree, start, page_end, GFP_NOFS);
1815         unlock_page(page);
1816         return 0;
1817 }
1818
1819 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1820                           get_extent_t *get_extent,
1821                           struct writeback_control *wbc)
1822 {
1823         int ret;
1824         struct extent_page_data epd = {
1825                 .bio = NULL,
1826                 .tree = tree,
1827                 .get_extent = get_extent,
1828         };
1829
1830         ret = __extent_writepage(page, wbc, &epd);
1831         if (epd.bio)
1832                 submit_one_bio(WRITE, epd.bio);
1833         return ret;
1834 }
1835 EXPORT_SYMBOL(extent_write_full_page);
1836
1837 int extent_writepages(struct extent_map_tree *tree,
1838                       struct address_space *mapping,
1839                       get_extent_t *get_extent,
1840                       struct writeback_control *wbc)
1841 {
1842         int ret;
1843         struct extent_page_data epd = {
1844                 .bio = NULL,
1845                 .tree = tree,
1846                 .get_extent = get_extent,
1847         };
1848
1849         ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1850         if (epd.bio)
1851                 submit_one_bio(WRITE, epd.bio);
1852         return ret;
1853 }
1854 EXPORT_SYMBOL(extent_writepages);
1855
1856 int extent_readpages(struct extent_map_tree *tree,
1857                      struct address_space *mapping,
1858                      struct list_head *pages, unsigned nr_pages,
1859                      get_extent_t get_extent)
1860 {
1861         struct bio *bio = NULL;
1862         unsigned page_idx;
1863         struct pagevec pvec;
1864
1865         pagevec_init(&pvec, 0);
1866         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1867                 struct page *page = list_entry(pages->prev, struct page, lru);
1868
1869                 prefetchw(&page->flags);
1870                 list_del(&page->lru);
1871                 /*
1872                  * what we want to do here is call add_to_page_cache_lru,
1873                  * but that isn't exported, so we reproduce it here
1874                  */
1875                 if (!add_to_page_cache(page, mapping,
1876                                         page->index, GFP_KERNEL)) {
1877
1878                         /* open coding of lru_cache_add, also not exported */
1879                         page_cache_get(page);
1880                         if (!pagevec_add(&pvec, page))
1881                                 __pagevec_lru_add(&pvec);
1882                         __extent_read_full_page(tree, page, get_extent, &bio);
1883                 }
1884                 page_cache_release(page);
1885         }
1886         if (pagevec_count(&pvec))
1887                 __pagevec_lru_add(&pvec);
1888         BUG_ON(!list_empty(pages));
1889         if (bio)
1890                 submit_one_bio(READ, bio);
1891         return 0;
1892 }
1893 EXPORT_SYMBOL(extent_readpages);
1894
1895 /*
1896  * basic invalidatepage code, this waits on any locked or writeback
1897  * ranges corresponding to the page, and then deletes any extent state
1898  * records from the tree
1899  */
1900 int extent_invalidatepage(struct extent_map_tree *tree,
1901                           struct page *page, unsigned long offset)
1902 {
1903         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1904         u64 end = start + PAGE_CACHE_SIZE - 1;
1905         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1906
1907         start += (offset + blocksize -1) & ~(blocksize - 1);
1908         if (start > end)
1909                 return 0;
1910
1911         lock_extent(tree, start, end, GFP_NOFS);
1912         wait_on_extent_writeback(tree, start, end);
1913         clear_extent_bit(tree, start, end,
1914                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1915                          1, 1, GFP_NOFS);
1916         return 0;
1917 }
1918 EXPORT_SYMBOL(extent_invalidatepage);
1919
1920 /*
1921  * simple commit_write call, set_range_dirty is used to mark both
1922  * the pages and the extent records as dirty
1923  */
1924 int extent_commit_write(struct extent_map_tree *tree,
1925                         struct inode *inode, struct page *page,
1926                         unsigned from, unsigned to)
1927 {
1928         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1929
1930         set_page_extent_mapped(page);
1931         set_page_dirty(page);
1932
1933         if (pos > inode->i_size) {
1934                 i_size_write(inode, pos);
1935                 mark_inode_dirty(inode);
1936         }
1937         return 0;
1938 }
1939 EXPORT_SYMBOL(extent_commit_write);
1940
1941 int extent_prepare_write(struct extent_map_tree *tree,
1942                          struct inode *inode, struct page *page,
1943                          unsigned from, unsigned to, get_extent_t *get_extent)
1944 {
1945         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1946         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1947         u64 block_start;
1948         u64 orig_block_start;
1949         u64 block_end;
1950         u64 cur_end;
1951         struct extent_map *em;
1952         unsigned blocksize = 1 << inode->i_blkbits;
1953         size_t page_offset = 0;
1954         size_t block_off_start;
1955         size_t block_off_end;
1956         int err = 0;
1957         int iocount = 0;
1958         int ret = 0;
1959         int isnew;
1960
1961         set_page_extent_mapped(page);
1962
1963         block_start = (page_start + from) & ~((u64)blocksize - 1);
1964         block_end = (page_start + to - 1) | (blocksize - 1);
1965         orig_block_start = block_start;
1966
1967         lock_extent(tree, page_start, page_end, GFP_NOFS);
1968         while(block_start <= block_end) {
1969                 em = get_extent(inode, page, page_offset, block_start,
1970                                 block_end, 1);
1971                 if (IS_ERR(em) || !em) {
1972                         goto err;
1973                 }
1974                 cur_end = min(block_end, em->end);
1975                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1976                 block_off_end = block_off_start + blocksize;
1977                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1978
1979                 if (!PageUptodate(page) && isnew &&
1980                     (block_off_end > to || block_off_start < from)) {
1981                         void *kaddr;
1982
1983                         kaddr = kmap_atomic(page, KM_USER0);
1984                         if (block_off_end > to)
1985                                 memset(kaddr + to, 0, block_off_end - to);
1986                         if (block_off_start < from)
1987                                 memset(kaddr + block_off_start, 0,
1988                                        from - block_off_start);
1989                         flush_dcache_page(page);
1990                         kunmap_atomic(kaddr, KM_USER0);
1991                 }
1992                 if (!isnew && !PageUptodate(page) &&
1993                     (block_off_end > to || block_off_start < from) &&
1994                     !test_range_bit(tree, block_start, cur_end,
1995                                     EXTENT_UPTODATE, 1)) {
1996                         u64 sector;
1997                         u64 extent_offset = block_start - em->start;
1998                         size_t iosize;
1999                         sector = (em->block_start + extent_offset) >> 9;
2000                         iosize = (cur_end - block_start + blocksize - 1) &
2001                                 ~((u64)blocksize - 1);
2002                         /*
2003                          * we've already got the extent locked, but we
2004                          * need to split the state such that our end_bio
2005                          * handler can clear the lock.
2006                          */
2007                         set_extent_bit(tree, block_start,
2008                                        block_start + iosize - 1,
2009                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2010                         ret = submit_extent_page(READ, tree, page,
2011                                          sector, iosize, page_offset, em->bdev,
2012                                          NULL, 1,
2013                                          end_bio_extent_preparewrite);
2014                         iocount++;
2015                         block_start = block_start + iosize;
2016                 } else {
2017                         set_extent_uptodate(tree, block_start, cur_end,
2018                                             GFP_NOFS);
2019                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2020                         block_start = cur_end + 1;
2021                 }
2022                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2023                 free_extent_map(em);
2024         }
2025         if (iocount) {
2026                 wait_extent_bit(tree, orig_block_start,
2027                                 block_end, EXTENT_LOCKED);
2028         }
2029         check_page_uptodate(tree, page);
2030 err:
2031         /* FIXME, zero out newly allocated blocks on error */
2032         return err;
2033 }
2034 EXPORT_SYMBOL(extent_prepare_write);
2035
2036 /*
2037  * a helper for releasepage.  As long as there are no locked extents
2038  * in the range corresponding to the page, both state records and extent
2039  * map records are removed
2040  */
2041 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2042 {
2043         struct extent_map *em;
2044         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2045         u64 end = start + PAGE_CACHE_SIZE - 1;
2046         u64 orig_start = start;
2047         int ret = 1;
2048
2049         while (start <= end) {
2050                 em = lookup_extent_mapping(tree, start, end);
2051                 if (!em || IS_ERR(em))
2052                         break;
2053                 if (!test_range_bit(tree, em->start, em->end,
2054                                     EXTENT_LOCKED, 0)) {
2055                         remove_extent_mapping(tree, em);
2056                         /* once for the rb tree */
2057                         free_extent_map(em);
2058                 }
2059                 start = em->end + 1;
2060                 /* once for us */
2061                 free_extent_map(em);
2062         }
2063         if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2064                 ret = 0;
2065         else
2066                 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2067                                  1, 1, GFP_NOFS);
2068         return ret;
2069 }
2070 EXPORT_SYMBOL(try_release_extent_mapping);
2071
2072 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2073                 get_extent_t *get_extent)
2074 {
2075         struct inode *inode = mapping->host;
2076         u64 start = iblock << inode->i_blkbits;
2077         u64 end = start + (1 << inode->i_blkbits) - 1;
2078         sector_t sector = 0;
2079         struct extent_map *em;
2080
2081         em = get_extent(inode, NULL, 0, start, end, 0);
2082         if (!em || IS_ERR(em))
2083                 return 0;
2084
2085         if (em->block_start == EXTENT_MAP_INLINE ||
2086             em->block_start == EXTENT_MAP_HOLE)
2087                 goto out;
2088
2089         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2090 out:
2091         free_extent_map(em);
2092         return sector;
2093 }
2094
2095 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2096 {
2097         if (list_empty(&eb->lru)) {
2098                 extent_buffer_get(eb);
2099                 list_add(&eb->lru, &tree->buffer_lru);
2100                 tree->lru_size++;
2101                 if (tree->lru_size >= BUFFER_LRU_MAX) {
2102                         struct extent_buffer *rm;
2103                         rm = list_entry(tree->buffer_lru.prev,
2104                                         struct extent_buffer, lru);
2105                         tree->lru_size--;
2106                         list_del_init(&rm->lru);
2107                         free_extent_buffer(rm);
2108                 }
2109         } else
2110                 list_move(&eb->lru, &tree->buffer_lru);
2111         return 0;
2112 }
2113 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2114                                       u64 start, unsigned long len)
2115 {
2116         struct list_head *lru = &tree->buffer_lru;
2117         struct list_head *cur = lru->next;
2118         struct extent_buffer *eb;
2119
2120         if (list_empty(lru))
2121                 return NULL;
2122
2123         do {
2124                 eb = list_entry(cur, struct extent_buffer, lru);
2125                 if (eb->start == start && eb->len == len) {
2126                         extent_buffer_get(eb);
2127                         return eb;
2128                 }
2129                 cur = cur->next;
2130         } while (cur != lru);
2131         return NULL;
2132 }
2133
2134 static inline unsigned long num_extent_pages(u64 start, u64 len)
2135 {
2136         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2137                 (start >> PAGE_CACHE_SHIFT);
2138 }
2139
2140 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2141                                               unsigned long i)
2142 {
2143         struct page *p;
2144         struct address_space *mapping;
2145
2146         if (i == 0)
2147                 return eb->first_page;
2148         i += eb->start >> PAGE_CACHE_SHIFT;
2149         mapping = eb->first_page->mapping;
2150         read_lock_irq(&mapping->tree_lock);
2151         p = radix_tree_lookup(&mapping->page_tree, i);
2152         read_unlock_irq(&mapping->tree_lock);
2153         return p;
2154 }
2155
2156 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2157                                                    u64 start,
2158                                                    unsigned long len,
2159                                                    gfp_t mask)
2160 {
2161         struct extent_buffer *eb = NULL;
2162
2163         spin_lock(&tree->lru_lock);
2164         eb = find_lru(tree, start, len);
2165         spin_unlock(&tree->lru_lock);
2166         if (eb) {
2167                 return eb;
2168         }
2169
2170         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2171         INIT_LIST_HEAD(&eb->lru);
2172         eb->start = start;
2173         eb->len = len;
2174         atomic_set(&eb->refs, 1);
2175
2176         return eb;
2177 }
2178
2179 static void __free_extent_buffer(struct extent_buffer *eb)
2180 {
2181         kmem_cache_free(extent_buffer_cache, eb);
2182 }
2183
2184 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2185                                           u64 start, unsigned long len,
2186                                           struct page *page0,
2187                                           gfp_t mask)
2188 {
2189         unsigned long num_pages = num_extent_pages(start, len);
2190         unsigned long i;
2191         unsigned long index = start >> PAGE_CACHE_SHIFT;
2192         struct extent_buffer *eb;
2193         struct page *p;
2194         struct address_space *mapping = tree->mapping;
2195         int uptodate = 1;
2196
2197         eb = __alloc_extent_buffer(tree, start, len, mask);
2198         if (!eb || IS_ERR(eb))
2199                 return NULL;
2200
2201         if (eb->flags & EXTENT_BUFFER_FILLED)
2202                 goto lru_add;
2203
2204         if (page0) {
2205                 eb->first_page = page0;
2206                 i = 1;
2207                 index++;
2208                 page_cache_get(page0);
2209                 mark_page_accessed(page0);
2210                 set_page_extent_mapped(page0);
2211                 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2212                                  len << 2);
2213         } else {
2214                 i = 0;
2215         }
2216         for (; i < num_pages; i++, index++) {
2217                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2218                 if (!p) {
2219                         WARN_ON(1);
2220                         goto fail;
2221                 }
2222                 set_page_extent_mapped(p);
2223                 mark_page_accessed(p);
2224                 if (i == 0) {
2225                         eb->first_page = p;
2226                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2227                                          len << 2);
2228                 } else {
2229                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2230                 }
2231                 if (!PageUptodate(p))
2232                         uptodate = 0;
2233                 unlock_page(p);
2234         }
2235         if (uptodate)
2236                 eb->flags |= EXTENT_UPTODATE;
2237         eb->flags |= EXTENT_BUFFER_FILLED;
2238
2239 lru_add:
2240         spin_lock(&tree->lru_lock);
2241         add_lru(tree, eb);
2242         spin_unlock(&tree->lru_lock);
2243         return eb;
2244
2245 fail:
2246         spin_lock(&tree->lru_lock);
2247         list_del_init(&eb->lru);
2248         spin_unlock(&tree->lru_lock);
2249         if (!atomic_dec_and_test(&eb->refs))
2250                 return NULL;
2251         for (index = 0; index < i; index++) {
2252                 page_cache_release(extent_buffer_page(eb, index));
2253         }
2254         __free_extent_buffer(eb);
2255         return NULL;
2256 }
2257 EXPORT_SYMBOL(alloc_extent_buffer);
2258
2259 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2260                                          u64 start, unsigned long len,
2261                                           gfp_t mask)
2262 {
2263         unsigned long num_pages = num_extent_pages(start, len);
2264         unsigned long i;
2265         unsigned long index = start >> PAGE_CACHE_SHIFT;
2266         struct extent_buffer *eb;
2267         struct page *p;
2268         struct address_space *mapping = tree->mapping;
2269         int uptodate = 1;
2270
2271         eb = __alloc_extent_buffer(tree, start, len, mask);
2272         if (!eb || IS_ERR(eb))
2273                 return NULL;
2274
2275         if (eb->flags & EXTENT_BUFFER_FILLED)
2276                 goto lru_add;
2277
2278         for (i = 0; i < num_pages; i++, index++) {
2279                 p = find_lock_page(mapping, index);
2280                 if (!p) {
2281                         goto fail;
2282                 }
2283                 set_page_extent_mapped(p);
2284                 mark_page_accessed(p);
2285
2286                 if (i == 0) {
2287                         eb->first_page = p;
2288                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2289                                          len << 2);
2290                 } else {
2291                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2292                 }
2293
2294                 if (!PageUptodate(p))
2295                         uptodate = 0;
2296                 unlock_page(p);
2297         }
2298         if (uptodate)
2299                 eb->flags |= EXTENT_UPTODATE;
2300         eb->flags |= EXTENT_BUFFER_FILLED;
2301
2302 lru_add:
2303         spin_lock(&tree->lru_lock);
2304         add_lru(tree, eb);
2305         spin_unlock(&tree->lru_lock);
2306         return eb;
2307 fail:
2308         spin_lock(&tree->lru_lock);
2309         list_del_init(&eb->lru);
2310         spin_unlock(&tree->lru_lock);
2311         if (!atomic_dec_and_test(&eb->refs))
2312                 return NULL;
2313         for (index = 0; index < i; index++) {
2314                 page_cache_release(extent_buffer_page(eb, index));
2315         }
2316         __free_extent_buffer(eb);
2317         return NULL;
2318 }
2319 EXPORT_SYMBOL(find_extent_buffer);
2320
2321 void free_extent_buffer(struct extent_buffer *eb)
2322 {
2323         unsigned long i;
2324         unsigned long num_pages;
2325
2326         if (!eb)
2327                 return;
2328
2329         if (!atomic_dec_and_test(&eb->refs))
2330                 return;
2331
2332         num_pages = num_extent_pages(eb->start, eb->len);
2333
2334         for (i = 0; i < num_pages; i++) {
2335                 page_cache_release(extent_buffer_page(eb, i));
2336         }
2337         __free_extent_buffer(eb);
2338 }
2339 EXPORT_SYMBOL(free_extent_buffer);
2340
2341 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2342                               struct extent_buffer *eb)
2343 {
2344         int set;
2345         unsigned long i;
2346         unsigned long num_pages;
2347         struct page *page;
2348
2349         u64 start = eb->start;
2350         u64 end = start + eb->len - 1;
2351
2352         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2353         num_pages = num_extent_pages(eb->start, eb->len);
2354
2355         for (i = 0; i < num_pages; i++) {
2356                 page = extent_buffer_page(eb, i);
2357                 lock_page(page);
2358                 /*
2359                  * if we're on the last page or the first page and the
2360                  * block isn't aligned on a page boundary, do extra checks
2361                  * to make sure we don't clean page that is partially dirty
2362                  */
2363                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2364                     ((i == num_pages - 1) &&
2365                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2366                         start = (u64)page->index << PAGE_CACHE_SHIFT;
2367                         end  = start + PAGE_CACHE_SIZE - 1;
2368                         if (test_range_bit(tree, start, end,
2369                                            EXTENT_DIRTY, 0)) {
2370                                 unlock_page(page);
2371                                 continue;
2372                         }
2373                 }
2374                 clear_page_dirty_for_io(page);
2375                 unlock_page(page);
2376         }
2377         return 0;
2378 }
2379 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2380
2381 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2382                                     struct extent_buffer *eb)
2383 {
2384         return wait_on_extent_writeback(tree, eb->start,
2385                                         eb->start + eb->len - 1);
2386 }
2387 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2388
2389 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2390                              struct extent_buffer *eb)
2391 {
2392         unsigned long i;
2393         unsigned long num_pages;
2394
2395         num_pages = num_extent_pages(eb->start, eb->len);
2396         for (i = 0; i < num_pages; i++) {
2397                 struct page *page = extent_buffer_page(eb, i);
2398                 /* writepage may need to do something special for the
2399                  * first page, we have to make sure page->private is
2400                  * properly set.  releasepage may drop page->private
2401                  * on us if the page isn't already dirty.
2402                  */
2403                 if (i == 0) {
2404                         lock_page(page);
2405                         set_page_private(page,
2406                                          EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2407                                          eb->len << 2);
2408                 }
2409                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2410                 if (i == 0)
2411                         unlock_page(page);
2412         }
2413         return set_extent_dirty(tree, eb->start,
2414                                 eb->start + eb->len - 1, GFP_NOFS);
2415 }
2416 EXPORT_SYMBOL(set_extent_buffer_dirty);
2417
2418 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2419                                 struct extent_buffer *eb)
2420 {
2421         unsigned long i;
2422         struct page *page;
2423         unsigned long num_pages;
2424
2425         num_pages = num_extent_pages(eb->start, eb->len);
2426
2427         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2428                             GFP_NOFS);
2429         for (i = 0; i < num_pages; i++) {
2430                 page = extent_buffer_page(eb, i);
2431                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2432                     ((i == num_pages - 1) &&
2433                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2434                         check_page_uptodate(tree, page);
2435                         continue;
2436                 }
2437                 SetPageUptodate(page);
2438         }
2439         return 0;
2440 }
2441 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2442
2443 int extent_buffer_uptodate(struct extent_map_tree *tree,
2444                              struct extent_buffer *eb)
2445 {
2446         if (eb->flags & EXTENT_UPTODATE)
2447                 return 1;
2448         return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2449                            EXTENT_UPTODATE, 1);
2450 }
2451 EXPORT_SYMBOL(extent_buffer_uptodate);
2452
2453 int read_extent_buffer_pages(struct extent_map_tree *tree,
2454                              struct extent_buffer *eb,
2455                              u64 start,
2456                              int wait)
2457 {
2458         unsigned long i;
2459         unsigned long start_i;
2460         struct page *page;
2461         int err;
2462         int ret = 0;
2463         unsigned long num_pages;
2464
2465         if (eb->flags & EXTENT_UPTODATE)
2466                 return 0;
2467
2468         if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2469                            EXTENT_UPTODATE, 1)) {
2470                 return 0;
2471         }
2472         if (start) {
2473                 WARN_ON(start < eb->start);
2474                 start_i = (start >> PAGE_CACHE_SHIFT) -
2475                         (eb->start >> PAGE_CACHE_SHIFT);
2476         } else {
2477                 start_i = 0;
2478         }
2479
2480         num_pages = num_extent_pages(eb->start, eb->len);
2481         for (i = start_i; i < num_pages; i++) {
2482                 page = extent_buffer_page(eb, i);
2483                 if (PageUptodate(page)) {
2484                         continue;
2485                 }
2486                 if (!wait) {
2487                         if (TestSetPageLocked(page)) {
2488                                 continue;
2489                         }
2490                 } else {
2491                         lock_page(page);
2492                 }
2493                 if (!PageUptodate(page)) {
2494                         err = page->mapping->a_ops->readpage(NULL, page);
2495                         if (err) {
2496                                 ret = err;
2497                         }
2498                 } else {
2499                         unlock_page(page);
2500                 }
2501         }
2502
2503         if (ret || !wait) {
2504                 return ret;
2505         }
2506
2507         for (i = start_i; i < num_pages; i++) {
2508                 page = extent_buffer_page(eb, i);
2509                 wait_on_page_locked(page);
2510                 if (!PageUptodate(page)) {
2511                         ret = -EIO;
2512                 }
2513         }
2514         if (!ret)
2515                 eb->flags |= EXTENT_UPTODATE;
2516         return ret;
2517 }
2518 EXPORT_SYMBOL(read_extent_buffer_pages);
2519
2520 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2521                         unsigned long start,
2522                         unsigned long len)
2523 {
2524         size_t cur;
2525         size_t offset;
2526         struct page *page;
2527         char *kaddr;
2528         char *dst = (char *)dstv;
2529         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2530         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2531         unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2532
2533         WARN_ON(start > eb->len);
2534         WARN_ON(start + len > eb->start + eb->len);
2535
2536         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2537
2538         while(len > 0) {
2539                 page = extent_buffer_page(eb, i);
2540                 if (!PageUptodate(page)) {
2541                         printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2542                         WARN_ON(1);
2543                 }
2544                 WARN_ON(!PageUptodate(page));
2545
2546                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2547                 kaddr = kmap_atomic(page, KM_USER1);
2548                 memcpy(dst, kaddr + offset, cur);
2549                 kunmap_atomic(kaddr, KM_USER1);
2550
2551                 dst += cur;
2552                 len -= cur;
2553                 offset = 0;
2554                 i++;
2555         }
2556 }
2557 EXPORT_SYMBOL(read_extent_buffer);
2558
2559 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2560                                unsigned long min_len, char **token, char **map,
2561                                unsigned long *map_start,
2562                                unsigned long *map_len, int km)
2563 {
2564         size_t offset = start & (PAGE_CACHE_SIZE - 1);
2565         char *kaddr;
2566         struct page *p;
2567         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2568         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2569         unsigned long end_i = (start_offset + start + min_len - 1) >>
2570                 PAGE_CACHE_SHIFT;
2571
2572         if (i != end_i)
2573                 return -EINVAL;
2574
2575         if (i == 0) {
2576                 offset = start_offset;
2577                 *map_start = 0;
2578         } else {
2579                 offset = 0;
2580                 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2581         }
2582         if (start + min_len > eb->len) {
2583 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2584                 WARN_ON(1);
2585         }
2586
2587         p = extent_buffer_page(eb, i);
2588         WARN_ON(!PageUptodate(p));
2589         kaddr = kmap_atomic(p, km);
2590         *token = kaddr;
2591         *map = kaddr + offset;
2592         *map_len = PAGE_CACHE_SIZE - offset;
2593         return 0;
2594 }
2595 EXPORT_SYMBOL(map_private_extent_buffer);
2596
2597 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2598                       unsigned long min_len,
2599                       char **token, char **map,
2600                       unsigned long *map_start,
2601                       unsigned long *map_len, int km)
2602 {
2603         int err;
2604         int save = 0;
2605         if (eb->map_token) {
2606                 unmap_extent_buffer(eb, eb->map_token, km);
2607                 eb->map_token = NULL;
2608                 save = 1;
2609         }
2610         err = map_private_extent_buffer(eb, start, min_len, token, map,
2611                                        map_start, map_len, km);
2612         if (!err && save) {
2613                 eb->map_token = *token;
2614                 eb->kaddr = *map;
2615                 eb->map_start = *map_start;
2616                 eb->map_len = *map_len;
2617         }
2618         return err;
2619 }
2620 EXPORT_SYMBOL(map_extent_buffer);
2621
2622 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2623 {
2624         kunmap_atomic(token, km);
2625 }
2626 EXPORT_SYMBOL(unmap_extent_buffer);
2627
2628 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2629                           unsigned long start,
2630                           unsigned long len)
2631 {
2632         size_t cur;
2633         size_t offset;
2634         struct page *page;
2635         char *kaddr;
2636         char *ptr = (char *)ptrv;
2637         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2638         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2639         int ret = 0;
2640
2641         WARN_ON(start > eb->len);
2642         WARN_ON(start + len > eb->start + eb->len);
2643
2644         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2645
2646         while(len > 0) {
2647                 page = extent_buffer_page(eb, i);
2648                 WARN_ON(!PageUptodate(page));
2649
2650                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2651
2652                 kaddr = kmap_atomic(page, KM_USER0);
2653                 ret = memcmp(ptr, kaddr + offset, cur);
2654                 kunmap_atomic(kaddr, KM_USER0);
2655                 if (ret)
2656                         break;
2657
2658                 ptr += cur;
2659                 len -= cur;
2660                 offset = 0;
2661                 i++;
2662         }
2663         return ret;
2664 }
2665 EXPORT_SYMBOL(memcmp_extent_buffer);
2666
2667 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2668                          unsigned long start, unsigned long len)
2669 {
2670         size_t cur;
2671         size_t offset;
2672         struct page *page;
2673         char *kaddr;
2674         char *src = (char *)srcv;
2675         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2676         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2677
2678         WARN_ON(start > eb->len);
2679         WARN_ON(start + len > eb->start + eb->len);
2680
2681         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2682
2683         while(len > 0) {
2684                 page = extent_buffer_page(eb, i);
2685                 WARN_ON(!PageUptodate(page));
2686
2687                 cur = min(len, PAGE_CACHE_SIZE - offset);
2688                 kaddr = kmap_atomic(page, KM_USER1);
2689                 memcpy(kaddr + offset, src, cur);
2690                 kunmap_atomic(kaddr, KM_USER1);
2691
2692                 src += cur;
2693                 len -= cur;
2694                 offset = 0;
2695                 i++;
2696         }
2697 }
2698 EXPORT_SYMBOL(write_extent_buffer);
2699
2700 void memset_extent_buffer(struct extent_buffer *eb, char c,
2701                           unsigned long start, unsigned long len)
2702 {
2703         size_t cur;
2704         size_t offset;
2705         struct page *page;
2706         char *kaddr;
2707         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2708         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2709
2710         WARN_ON(start > eb->len);
2711         WARN_ON(start + len > eb->start + eb->len);
2712
2713         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2714
2715         while(len > 0) {
2716                 page = extent_buffer_page(eb, i);
2717                 WARN_ON(!PageUptodate(page));
2718
2719                 cur = min(len, PAGE_CACHE_SIZE - offset);
2720                 kaddr = kmap_atomic(page, KM_USER0);
2721                 memset(kaddr + offset, c, cur);
2722                 kunmap_atomic(kaddr, KM_USER0);
2723
2724                 len -= cur;
2725                 offset = 0;
2726                 i++;
2727         }
2728 }
2729 EXPORT_SYMBOL(memset_extent_buffer);
2730
2731 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2732                         unsigned long dst_offset, unsigned long src_offset,
2733                         unsigned long len)
2734 {
2735         u64 dst_len = dst->len;
2736         size_t cur;
2737         size_t offset;
2738         struct page *page;
2739         char *kaddr;
2740         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2741         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2742
2743         WARN_ON(src->len != dst_len);
2744
2745         offset = (start_offset + dst_offset) &
2746                 ((unsigned long)PAGE_CACHE_SIZE - 1);
2747
2748         while(len > 0) {
2749                 page = extent_buffer_page(dst, i);
2750                 WARN_ON(!PageUptodate(page));
2751
2752                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2753
2754                 kaddr = kmap_atomic(page, KM_USER0);
2755                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2756                 kunmap_atomic(kaddr, KM_USER0);
2757
2758                 src_offset += cur;
2759                 len -= cur;
2760                 offset = 0;
2761                 i++;
2762         }
2763 }
2764 EXPORT_SYMBOL(copy_extent_buffer);
2765
2766 static void move_pages(struct page *dst_page, struct page *src_page,
2767                        unsigned long dst_off, unsigned long src_off,
2768                        unsigned long len)
2769 {
2770         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2771         if (dst_page == src_page) {
2772                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2773         } else {
2774                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2775                 char *p = dst_kaddr + dst_off + len;
2776                 char *s = src_kaddr + src_off + len;
2777
2778                 while (len--)
2779                         *--p = *--s;
2780
2781                 kunmap_atomic(src_kaddr, KM_USER1);
2782         }
2783         kunmap_atomic(dst_kaddr, KM_USER0);
2784 }
2785
2786 static void copy_pages(struct page *dst_page, struct page *src_page,
2787                        unsigned long dst_off, unsigned long src_off,
2788                        unsigned long len)
2789 {
2790         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2791         char *src_kaddr;
2792
2793         if (dst_page != src_page)
2794                 src_kaddr = kmap_atomic(src_page, KM_USER1);
2795         else
2796                 src_kaddr = dst_kaddr;
2797
2798         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2799         kunmap_atomic(dst_kaddr, KM_USER0);
2800         if (dst_page != src_page)
2801                 kunmap_atomic(src_kaddr, KM_USER1);
2802 }
2803
2804 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2805                            unsigned long src_offset, unsigned long len)
2806 {
2807         size_t cur;
2808         size_t dst_off_in_page;
2809         size_t src_off_in_page;
2810         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2811         unsigned long dst_i;
2812         unsigned long src_i;
2813
2814         if (src_offset + len > dst->len) {
2815                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2816                        src_offset, len, dst->len);
2817                 BUG_ON(1);
2818         }
2819         if (dst_offset + len > dst->len) {
2820                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2821                        dst_offset, len, dst->len);
2822                 BUG_ON(1);
2823         }
2824
2825         while(len > 0) {
2826                 dst_off_in_page = (start_offset + dst_offset) &
2827                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2828                 src_off_in_page = (start_offset + src_offset) &
2829                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2830
2831                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2832                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2833
2834                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2835                                                src_off_in_page));
2836                 cur = min_t(unsigned long, cur,
2837                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2838
2839                 copy_pages(extent_buffer_page(dst, dst_i),
2840                            extent_buffer_page(dst, src_i),
2841                            dst_off_in_page, src_off_in_page, cur);
2842
2843                 src_offset += cur;
2844                 dst_offset += cur;
2845                 len -= cur;
2846         }
2847 }
2848 EXPORT_SYMBOL(memcpy_extent_buffer);
2849
2850 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2851                            unsigned long src_offset, unsigned long len)
2852 {
2853         size_t cur;
2854         size_t dst_off_in_page;
2855         size_t src_off_in_page;
2856         unsigned long dst_end = dst_offset + len - 1;
2857         unsigned long src_end = src_offset + len - 1;
2858         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2859         unsigned long dst_i;
2860         unsigned long src_i;
2861
2862         if (src_offset + len > dst->len) {
2863                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2864                        src_offset, len, dst->len);
2865                 BUG_ON(1);
2866         }
2867         if (dst_offset + len > dst->len) {
2868                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2869                        dst_offset, len, dst->len);
2870                 BUG_ON(1);
2871         }
2872         if (dst_offset < src_offset) {
2873                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2874                 return;
2875         }
2876         while(len > 0) {
2877                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2878                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2879
2880                 dst_off_in_page = (start_offset + dst_end) &
2881                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2882                 src_off_in_page = (start_offset + src_end) &
2883                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2884
2885                 cur = min_t(unsigned long, len, src_off_in_page + 1);
2886                 cur = min(cur, dst_off_in_page + 1);
2887                 move_pages(extent_buffer_page(dst, dst_i),
2888                            extent_buffer_page(dst, src_i),
2889                            dst_off_in_page - cur + 1,
2890                            src_off_in_page - cur + 1, cur);
2891
2892                 dst_end -= cur;
2893                 src_end -= cur;
2894                 len -= cur;
2895         }
2896 }
2897 EXPORT_SYMBOL(memmove_extent_buffer);