1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
37 struct rb_node rb_node;
40 struct extent_page_data {
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
45 int __init extent_map_init(void)
47 extent_map_cache = btrfs_cache_create("extent_map",
48 sizeof(struct extent_map), 0,
50 if (!extent_map_cache)
52 extent_state_cache = btrfs_cache_create("extent_state",
53 sizeof(struct extent_state), 0,
55 if (!extent_state_cache)
57 extent_buffer_cache = btrfs_cache_create("extent_buffers",
58 sizeof(struct extent_buffer), 0,
60 if (!extent_buffer_cache)
61 goto free_state_cache;
65 kmem_cache_destroy(extent_state_cache);
67 kmem_cache_destroy(extent_map_cache);
71 void __exit extent_map_exit(void)
73 struct extent_state *state;
75 while (!list_empty(&states)) {
76 state = list_entry(states.next, struct extent_state, list);
77 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
78 list_del(&state->list);
79 kmem_cache_free(extent_state_cache, state);
84 kmem_cache_destroy(extent_map_cache);
85 if (extent_state_cache)
86 kmem_cache_destroy(extent_state_cache);
87 if (extent_buffer_cache)
88 kmem_cache_destroy(extent_buffer_cache);
91 void extent_map_tree_init(struct extent_map_tree *tree,
92 struct address_space *mapping, gfp_t mask)
94 tree->map.rb_node = NULL;
95 tree->state.rb_node = NULL;
97 rwlock_init(&tree->lock);
98 spin_lock_init(&tree->lru_lock);
99 tree->mapping = mapping;
100 INIT_LIST_HEAD(&tree->buffer_lru);
103 EXPORT_SYMBOL(extent_map_tree_init);
105 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
107 struct extent_buffer *eb;
108 while(!list_empty(&tree->buffer_lru)) {
109 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
111 list_del_init(&eb->lru);
112 free_extent_buffer(eb);
115 EXPORT_SYMBOL(extent_map_tree_empty_lru);
117 struct extent_map *alloc_extent_map(gfp_t mask)
119 struct extent_map *em;
120 em = kmem_cache_alloc(extent_map_cache, mask);
121 if (!em || IS_ERR(em))
124 atomic_set(&em->refs, 1);
127 EXPORT_SYMBOL(alloc_extent_map);
129 void free_extent_map(struct extent_map *em)
133 if (atomic_dec_and_test(&em->refs)) {
134 WARN_ON(em->in_tree);
135 kmem_cache_free(extent_map_cache, em);
138 EXPORT_SYMBOL(free_extent_map);
141 struct extent_state *alloc_extent_state(gfp_t mask)
143 struct extent_state *state;
146 state = kmem_cache_alloc(extent_state_cache, mask);
147 if (!state || IS_ERR(state))
153 spin_lock_irqsave(&state_lock, flags);
154 list_add(&state->list, &states);
155 spin_unlock_irqrestore(&state_lock, flags);
157 atomic_set(&state->refs, 1);
158 init_waitqueue_head(&state->wq);
161 EXPORT_SYMBOL(alloc_extent_state);
163 void free_extent_state(struct extent_state *state)
168 if (atomic_dec_and_test(&state->refs)) {
169 WARN_ON(state->in_tree);
170 spin_lock_irqsave(&state_lock, flags);
171 list_del(&state->list);
172 spin_unlock_irqrestore(&state_lock, flags);
173 kmem_cache_free(extent_state_cache, state);
176 EXPORT_SYMBOL(free_extent_state);
178 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
179 struct rb_node *node)
181 struct rb_node ** p = &root->rb_node;
182 struct rb_node * parent = NULL;
183 struct tree_entry *entry;
187 entry = rb_entry(parent, struct tree_entry, rb_node);
189 if (offset < entry->start)
191 else if (offset > entry->end)
197 entry = rb_entry(node, struct tree_entry, rb_node);
199 rb_link_node(node, parent, p);
200 rb_insert_color(node, root);
204 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
205 struct rb_node **prev_ret)
207 struct rb_node * n = root->rb_node;
208 struct rb_node *prev = NULL;
209 struct tree_entry *entry;
210 struct tree_entry *prev_entry = NULL;
213 entry = rb_entry(n, struct tree_entry, rb_node);
217 if (offset < entry->start)
219 else if (offset > entry->end)
226 while(prev && offset > prev_entry->end) {
227 prev = rb_next(prev);
228 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
236 struct rb_node *prev;
238 ret = __tree_search(root, offset, &prev);
244 static int tree_delete(struct rb_root *root, u64 offset)
246 struct rb_node *node;
247 struct tree_entry *entry;
249 node = __tree_search(root, offset, NULL);
252 entry = rb_entry(node, struct tree_entry, rb_node);
254 rb_erase(node, root);
259 * add_extent_mapping tries a simple backward merge with existing
260 * mappings. The extent_map struct passed in will be inserted into
261 * the tree directly (no copies made, just a reference taken).
263 int add_extent_mapping(struct extent_map_tree *tree,
264 struct extent_map *em)
267 struct extent_map *prev = NULL;
270 write_lock_irq(&tree->lock);
271 rb = tree_insert(&tree->map, em->end, &em->rb_node);
273 prev = rb_entry(rb, struct extent_map, rb_node);
274 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
278 atomic_inc(&em->refs);
279 if (em->start != 0) {
280 rb = rb_prev(&em->rb_node);
282 prev = rb_entry(rb, struct extent_map, rb_node);
283 if (prev && prev->end + 1 == em->start &&
284 ((em->block_start == EXTENT_MAP_HOLE &&
285 prev->block_start == EXTENT_MAP_HOLE) ||
286 (em->block_start == EXTENT_MAP_INLINE &&
287 prev->block_start == EXTENT_MAP_INLINE) ||
288 (em->block_start == EXTENT_MAP_DELALLOC &&
289 prev->block_start == EXTENT_MAP_DELALLOC) ||
290 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
291 em->block_start == prev->block_end + 1))) {
292 em->start = prev->start;
293 em->block_start = prev->block_start;
294 rb_erase(&prev->rb_node, &tree->map);
296 free_extent_map(prev);
300 write_unlock_irq(&tree->lock);
303 EXPORT_SYMBOL(add_extent_mapping);
306 * lookup_extent_mapping returns the first extent_map struct in the
307 * tree that intersects the [start, end] (inclusive) range. There may
308 * be additional objects in the tree that intersect, so check the object
309 * returned carefully to make sure you don't need additional lookups.
311 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
314 struct extent_map *em;
315 struct rb_node *rb_node;
317 read_lock_irq(&tree->lock);
318 rb_node = tree_search(&tree->map, start);
323 if (IS_ERR(rb_node)) {
324 em = ERR_PTR(PTR_ERR(rb_node));
327 em = rb_entry(rb_node, struct extent_map, rb_node);
328 if (em->end < start || em->start > end) {
332 atomic_inc(&em->refs);
334 read_unlock_irq(&tree->lock);
337 EXPORT_SYMBOL(lookup_extent_mapping);
340 * removes an extent_map struct from the tree. No reference counts are
341 * dropped, and no checks are done to see if the range is in use
343 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
347 write_lock_irq(&tree->lock);
348 ret = tree_delete(&tree->map, em->end);
349 write_unlock_irq(&tree->lock);
352 EXPORT_SYMBOL(remove_extent_mapping);
355 * utility function to look for merge candidates inside a given range.
356 * Any extents with matching state are merged together into a single
357 * extent in the tree. Extents with EXTENT_IO in their state field
358 * are not merged because the end_io handlers need to be able to do
359 * operations on them without sleeping (or doing allocations/splits).
361 * This should be called with the tree lock held.
363 static int merge_state(struct extent_map_tree *tree,
364 struct extent_state *state)
366 struct extent_state *other;
367 struct rb_node *other_node;
369 if (state->state & EXTENT_IOBITS)
372 other_node = rb_prev(&state->rb_node);
374 other = rb_entry(other_node, struct extent_state, rb_node);
375 if (other->end == state->start - 1 &&
376 other->state == state->state) {
377 state->start = other->start;
379 rb_erase(&other->rb_node, &tree->state);
380 free_extent_state(other);
383 other_node = rb_next(&state->rb_node);
385 other = rb_entry(other_node, struct extent_state, rb_node);
386 if (other->start == state->end + 1 &&
387 other->state == state->state) {
388 other->start = state->start;
390 rb_erase(&state->rb_node, &tree->state);
391 free_extent_state(state);
398 * insert an extent_state struct into the tree. 'bits' are set on the
399 * struct before it is inserted.
401 * This may return -EEXIST if the extent is already there, in which case the
402 * state struct is freed.
404 * The tree lock is not taken internally. This is a utility function and
405 * probably isn't what you want to call (see set/clear_extent_bit).
407 static int insert_state(struct extent_map_tree *tree,
408 struct extent_state *state, u64 start, u64 end,
411 struct rb_node *node;
414 printk("end < start %Lu %Lu\n", end, start);
417 state->state |= bits;
418 state->start = start;
420 node = tree_insert(&tree->state, end, &state->rb_node);
422 struct extent_state *found;
423 found = rb_entry(node, struct extent_state, rb_node);
424 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
425 free_extent_state(state);
428 merge_state(tree, state);
433 * split a given extent state struct in two, inserting the preallocated
434 * struct 'prealloc' as the newly created second half. 'split' indicates an
435 * offset inside 'orig' where it should be split.
438 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
439 * are two extent state structs in the tree:
440 * prealloc: [orig->start, split - 1]
441 * orig: [ split, orig->end ]
443 * The tree locks are not taken by this function. They need to be held
446 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
447 struct extent_state *prealloc, u64 split)
449 struct rb_node *node;
450 prealloc->start = orig->start;
451 prealloc->end = split - 1;
452 prealloc->state = orig->state;
455 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
457 struct extent_state *found;
458 found = rb_entry(node, struct extent_state, rb_node);
459 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
460 free_extent_state(prealloc);
467 * utility function to clear some bits in an extent state struct.
468 * it will optionally wake up any one waiting on this state (wake == 1), or
469 * forcibly remove the state from the tree (delete == 1).
471 * If no bits are set on the state struct after clearing things, the
472 * struct is freed and removed from the tree
474 static int clear_state_bit(struct extent_map_tree *tree,
475 struct extent_state *state, int bits, int wake,
478 int ret = state->state & bits;
479 state->state &= ~bits;
482 if (delete || state->state == 0) {
483 if (state->in_tree) {
484 rb_erase(&state->rb_node, &tree->state);
486 free_extent_state(state);
491 merge_state(tree, state);
497 * clear some bits on a range in the tree. This may require splitting
498 * or inserting elements in the tree, so the gfp mask is used to
499 * indicate which allocations or sleeping are allowed.
501 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
502 * the given range from the tree regardless of state (ie for truncate).
504 * the range [start, end] is inclusive.
506 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
507 * bits were already set, or zero if none of the bits were already set.
509 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
510 int bits, int wake, int delete, gfp_t mask)
512 struct extent_state *state;
513 struct extent_state *prealloc = NULL;
514 struct rb_node *node;
520 if (!prealloc && (mask & __GFP_WAIT)) {
521 prealloc = alloc_extent_state(mask);
526 write_lock_irqsave(&tree->lock, flags);
528 * this search will find the extents that end after
531 node = tree_search(&tree->state, start);
534 state = rb_entry(node, struct extent_state, rb_node);
535 if (state->start > end)
537 WARN_ON(state->end < start);
540 * | ---- desired range ---- |
542 * | ------------- state -------------- |
544 * We need to split the extent we found, and may flip
545 * bits on second half.
547 * If the extent we found extends past our range, we
548 * just split and search again. It'll get split again
549 * the next time though.
551 * If the extent we found is inside our range, we clear
552 * the desired bit on it.
555 if (state->start < start) {
556 err = split_state(tree, state, prealloc, start);
557 BUG_ON(err == -EEXIST);
561 if (state->end <= end) {
562 start = state->end + 1;
563 set |= clear_state_bit(tree, state, bits,
566 start = state->start;
571 * | ---- desired range ---- |
573 * We need to split the extent, and clear the bit
576 if (state->start <= end && state->end > end) {
577 err = split_state(tree, state, prealloc, end + 1);
578 BUG_ON(err == -EEXIST);
582 set |= clear_state_bit(tree, prealloc, bits,
588 start = state->end + 1;
589 set |= clear_state_bit(tree, state, bits, wake, delete);
593 write_unlock_irqrestore(&tree->lock, flags);
595 free_extent_state(prealloc);
602 write_unlock_irqrestore(&tree->lock, flags);
603 if (mask & __GFP_WAIT)
607 EXPORT_SYMBOL(clear_extent_bit);
609 static int wait_on_state(struct extent_map_tree *tree,
610 struct extent_state *state)
613 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
614 read_unlock_irq(&tree->lock);
616 read_lock_irq(&tree->lock);
617 finish_wait(&state->wq, &wait);
622 * waits for one or more bits to clear on a range in the state tree.
623 * The range [start, end] is inclusive.
624 * The tree lock is taken by this function
626 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
628 struct extent_state *state;
629 struct rb_node *node;
631 read_lock_irq(&tree->lock);
635 * this search will find all the extents that end after
638 node = tree_search(&tree->state, start);
642 state = rb_entry(node, struct extent_state, rb_node);
644 if (state->start > end)
647 if (state->state & bits) {
648 start = state->start;
649 atomic_inc(&state->refs);
650 wait_on_state(tree, state);
651 free_extent_state(state);
654 start = state->end + 1;
659 if (need_resched()) {
660 read_unlock_irq(&tree->lock);
662 read_lock_irq(&tree->lock);
666 read_unlock_irq(&tree->lock);
669 EXPORT_SYMBOL(wait_extent_bit);
672 * set some bits on a range in the tree. This may require allocations
673 * or sleeping, so the gfp mask is used to indicate what is allowed.
675 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
676 * range already has the desired bits set. The start of the existing
677 * range is returned in failed_start in this case.
679 * [start, end] is inclusive
680 * This takes the tree lock.
682 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
683 int exclusive, u64 *failed_start, gfp_t mask)
685 struct extent_state *state;
686 struct extent_state *prealloc = NULL;
687 struct rb_node *node;
694 if (!prealloc && (mask & __GFP_WAIT)) {
695 prealloc = alloc_extent_state(mask);
700 write_lock_irqsave(&tree->lock, flags);
702 * this search will find all the extents that end after
705 node = tree_search(&tree->state, start);
707 err = insert_state(tree, prealloc, start, end, bits);
709 BUG_ON(err == -EEXIST);
713 state = rb_entry(node, struct extent_state, rb_node);
714 last_start = state->start;
715 last_end = state->end;
718 * | ---- desired range ---- |
721 * Just lock what we found and keep going
723 if (state->start == start && state->end <= end) {
724 set = state->state & bits;
725 if (set && exclusive) {
726 *failed_start = state->start;
730 state->state |= bits;
731 start = state->end + 1;
732 merge_state(tree, state);
737 * | ---- desired range ---- |
740 * | ------------- state -------------- |
742 * We need to split the extent we found, and may flip bits on
745 * If the extent we found extends past our
746 * range, we just split and search again. It'll get split
747 * again the next time though.
749 * If the extent we found is inside our range, we set the
752 if (state->start < start) {
753 set = state->state & bits;
754 if (exclusive && set) {
755 *failed_start = start;
759 err = split_state(tree, state, prealloc, start);
760 BUG_ON(err == -EEXIST);
764 if (state->end <= end) {
765 state->state |= bits;
766 start = state->end + 1;
767 merge_state(tree, state);
769 start = state->start;
774 * | ---- desired range ---- |
775 * | state | or | state |
777 * There's a hole, we need to insert something in it and
778 * ignore the extent we found.
780 if (state->start > start) {
782 if (end < last_start)
785 this_end = last_start -1;
786 err = insert_state(tree, prealloc, start, this_end,
789 BUG_ON(err == -EEXIST);
792 start = this_end + 1;
796 * | ---- desired range ---- |
798 * We need to split the extent, and set the bit
801 if (state->start <= end && state->end > end) {
802 set = state->state & bits;
803 if (exclusive && set) {
804 *failed_start = start;
808 err = split_state(tree, state, prealloc, end + 1);
809 BUG_ON(err == -EEXIST);
811 prealloc->state |= bits;
812 merge_state(tree, prealloc);
820 write_unlock_irqrestore(&tree->lock, flags);
822 free_extent_state(prealloc);
829 write_unlock_irqrestore(&tree->lock, flags);
830 if (mask & __GFP_WAIT)
834 EXPORT_SYMBOL(set_extent_bit);
836 /* wrappers around set/clear extent bit */
837 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
840 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
843 EXPORT_SYMBOL(set_extent_dirty);
845 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
846 int bits, gfp_t mask)
848 return set_extent_bit(tree, start, end, bits, 0, NULL,
851 EXPORT_SYMBOL(set_extent_bits);
853 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
854 int bits, gfp_t mask)
856 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
858 EXPORT_SYMBOL(clear_extent_bits);
860 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
863 return set_extent_bit(tree, start, end,
864 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
867 EXPORT_SYMBOL(set_extent_delalloc);
869 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
872 return clear_extent_bit(tree, start, end,
873 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
875 EXPORT_SYMBOL(clear_extent_dirty);
877 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
880 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
883 EXPORT_SYMBOL(set_extent_new);
885 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
888 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
890 EXPORT_SYMBOL(clear_extent_new);
892 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
895 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
898 EXPORT_SYMBOL(set_extent_uptodate);
900 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
903 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
905 EXPORT_SYMBOL(clear_extent_uptodate);
907 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
910 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
913 EXPORT_SYMBOL(set_extent_writeback);
915 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
918 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
920 EXPORT_SYMBOL(clear_extent_writeback);
922 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
924 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
926 EXPORT_SYMBOL(wait_on_extent_writeback);
929 * locks a range in ascending order, waiting for any locked regions
930 * it hits on the way. [start,end] are inclusive, and this will sleep.
932 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
937 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
938 &failed_start, mask);
939 if (err == -EEXIST && (mask & __GFP_WAIT)) {
940 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
941 start = failed_start;
945 WARN_ON(start > end);
949 EXPORT_SYMBOL(lock_extent);
951 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
954 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
956 EXPORT_SYMBOL(unlock_extent);
959 * helper function to set pages and extents in the tree dirty
961 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
963 unsigned long index = start >> PAGE_CACHE_SHIFT;
964 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
967 while (index <= end_index) {
968 page = find_get_page(tree->mapping, index);
970 __set_page_dirty_nobuffers(page);
971 page_cache_release(page);
974 set_extent_dirty(tree, start, end, GFP_NOFS);
977 EXPORT_SYMBOL(set_range_dirty);
980 * helper function to set both pages and extents in the tree writeback
982 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
984 unsigned long index = start >> PAGE_CACHE_SHIFT;
985 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
988 while (index <= end_index) {
989 page = find_get_page(tree->mapping, index);
991 set_page_writeback(page);
992 page_cache_release(page);
995 set_extent_writeback(tree, start, end, GFP_NOFS);
998 EXPORT_SYMBOL(set_range_writeback);
1000 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1001 u64 *start_ret, u64 *end_ret, int bits)
1003 struct rb_node *node;
1004 struct extent_state *state;
1007 read_lock_irq(&tree->lock);
1009 * this search will find all the extents that end after
1012 node = tree_search(&tree->state, start);
1013 if (!node || IS_ERR(node)) {
1018 state = rb_entry(node, struct extent_state, rb_node);
1019 if (state->end >= start && (state->state & bits)) {
1020 *start_ret = state->start;
1021 *end_ret = state->end;
1025 node = rb_next(node);
1030 read_unlock_irq(&tree->lock);
1033 EXPORT_SYMBOL(find_first_extent_bit);
1035 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1036 u64 *start, u64 *end, u64 max_bytes)
1038 struct rb_node *node;
1039 struct extent_state *state;
1040 u64 cur_start = *start;
1042 u64 total_bytes = 0;
1044 write_lock_irq(&tree->lock);
1046 * this search will find all the extents that end after
1050 node = tree_search(&tree->state, cur_start);
1051 if (!node || IS_ERR(node)) {
1056 state = rb_entry(node, struct extent_state, rb_node);
1057 if (found && state->start != cur_start) {
1060 if (!(state->state & EXTENT_DELALLOC)) {
1064 struct extent_state *prev_state;
1065 struct rb_node *prev_node = node;
1067 prev_node = rb_prev(prev_node);
1070 prev_state = rb_entry(prev_node,
1071 struct extent_state,
1073 if (!(prev_state->state & EXTENT_DELALLOC))
1079 if (state->state & EXTENT_LOCKED) {
1081 atomic_inc(&state->refs);
1082 prepare_to_wait(&state->wq, &wait,
1083 TASK_UNINTERRUPTIBLE);
1084 write_unlock_irq(&tree->lock);
1086 write_lock_irq(&tree->lock);
1087 finish_wait(&state->wq, &wait);
1088 free_extent_state(state);
1091 state->state |= EXTENT_LOCKED;
1093 *start = state->start;
1096 cur_start = state->end + 1;
1097 node = rb_next(node);
1100 total_bytes += state->end - state->start + 1;
1101 if (total_bytes >= max_bytes)
1105 write_unlock_irq(&tree->lock);
1109 u64 count_range_bits(struct extent_map_tree *tree,
1110 u64 *start, u64 max_bytes, unsigned long bits)
1112 struct rb_node *node;
1113 struct extent_state *state;
1114 u64 cur_start = *start;
1115 u64 total_bytes = 0;
1118 write_lock_irq(&tree->lock);
1120 * this search will find all the extents that end after
1123 node = tree_search(&tree->state, cur_start);
1124 if (!node || IS_ERR(node)) {
1129 state = rb_entry(node, struct extent_state, rb_node);
1130 if ((state->state & bits)) {
1131 total_bytes += state->end - state->start + 1;
1132 if (total_bytes >= max_bytes)
1135 *start = state->start;
1139 node = rb_next(node);
1144 write_unlock_irq(&tree->lock);
1149 * helper function to lock both pages and extents in the tree.
1150 * pages must be locked first.
1152 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1154 unsigned long index = start >> PAGE_CACHE_SHIFT;
1155 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1159 while (index <= end_index) {
1160 page = grab_cache_page(tree->mapping, index);
1166 err = PTR_ERR(page);
1171 lock_extent(tree, start, end, GFP_NOFS);
1176 * we failed above in getting the page at 'index', so we undo here
1177 * up to but not including the page at 'index'
1180 index = start >> PAGE_CACHE_SHIFT;
1181 while (index < end_index) {
1182 page = find_get_page(tree->mapping, index);
1184 page_cache_release(page);
1189 EXPORT_SYMBOL(lock_range);
1192 * helper function to unlock both pages and extents in the tree.
1194 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1196 unsigned long index = start >> PAGE_CACHE_SHIFT;
1197 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1200 while (index <= end_index) {
1201 page = find_get_page(tree->mapping, index);
1203 page_cache_release(page);
1206 unlock_extent(tree, start, end, GFP_NOFS);
1209 EXPORT_SYMBOL(unlock_range);
1211 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1213 struct rb_node *node;
1214 struct extent_state *state;
1217 write_lock_irq(&tree->lock);
1219 * this search will find all the extents that end after
1222 node = tree_search(&tree->state, start);
1223 if (!node || IS_ERR(node)) {
1227 state = rb_entry(node, struct extent_state, rb_node);
1228 if (state->start != start) {
1232 state->private = private;
1234 write_unlock_irq(&tree->lock);
1238 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1240 struct rb_node *node;
1241 struct extent_state *state;
1244 read_lock_irq(&tree->lock);
1246 * this search will find all the extents that end after
1249 node = tree_search(&tree->state, start);
1250 if (!node || IS_ERR(node)) {
1254 state = rb_entry(node, struct extent_state, rb_node);
1255 if (state->start != start) {
1259 *private = state->private;
1261 read_unlock_irq(&tree->lock);
1266 * searches a range in the state tree for a given mask.
1267 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1268 * has the bits set. Otherwise, 1 is returned if any bit in the
1269 * range is found set.
1271 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1272 int bits, int filled)
1274 struct extent_state *state = NULL;
1275 struct rb_node *node;
1278 read_lock_irq(&tree->lock);
1279 node = tree_search(&tree->state, start);
1280 while (node && start <= end) {
1281 state = rb_entry(node, struct extent_state, rb_node);
1283 if (filled && state->start > start) {
1288 if (state->start > end)
1291 if (state->state & bits) {
1295 } else if (filled) {
1299 start = state->end + 1;
1302 node = rb_next(node);
1304 read_unlock_irq(&tree->lock);
1307 EXPORT_SYMBOL(test_range_bit);
1310 * helper function to set a given page up to date if all the
1311 * extents in the tree for that page are up to date
1313 static int check_page_uptodate(struct extent_map_tree *tree,
1316 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1317 u64 end = start + PAGE_CACHE_SIZE - 1;
1318 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1319 SetPageUptodate(page);
1324 * helper function to unlock a page if all the extents in the tree
1325 * for that page are unlocked
1327 static int check_page_locked(struct extent_map_tree *tree,
1330 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1331 u64 end = start + PAGE_CACHE_SIZE - 1;
1332 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1338 * helper function to end page writeback if all the extents
1339 * in the tree for that page are done with writeback
1341 static int check_page_writeback(struct extent_map_tree *tree,
1344 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1345 u64 end = start + PAGE_CACHE_SIZE - 1;
1346 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1347 end_page_writeback(page);
1351 /* lots and lots of room for performance fixes in the end_bio funcs */
1354 * after a writepage IO is done, we need to:
1355 * clear the uptodate bits on error
1356 * clear the writeback bits in the extent tree for this IO
1357 * end_page_writeback if the page has no more pending IO
1359 * Scheduling is not allowed, so the extent state tree is expected
1360 * to have one and only one object corresponding to this IO.
1362 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1363 static void end_bio_extent_writepage(struct bio *bio, int err)
1365 static int end_bio_extent_writepage(struct bio *bio,
1366 unsigned int bytes_done, int err)
1369 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1370 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1371 struct extent_map_tree *tree = bio->bi_private;
1376 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1382 struct page *page = bvec->bv_page;
1383 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1385 end = start + bvec->bv_len - 1;
1387 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1392 if (--bvec >= bio->bi_io_vec)
1393 prefetchw(&bvec->bv_page->flags);
1396 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1397 ClearPageUptodate(page);
1400 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1403 end_page_writeback(page);
1405 check_page_writeback(tree, page);
1406 if (tree->ops && tree->ops->writepage_end_io_hook)
1407 tree->ops->writepage_end_io_hook(page, start, end);
1408 } while (bvec >= bio->bi_io_vec);
1411 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1417 * after a readpage IO is done, we need to:
1418 * clear the uptodate bits on error
1419 * set the uptodate bits if things worked
1420 * set the page up to date if all extents in the tree are uptodate
1421 * clear the lock bit in the extent tree
1422 * unlock the page if there are no other extents locked for it
1424 * Scheduling is not allowed, so the extent state tree is expected
1425 * to have one and only one object corresponding to this IO.
1427 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1428 static void end_bio_extent_readpage(struct bio *bio, int err)
1430 static int end_bio_extent_readpage(struct bio *bio,
1431 unsigned int bytes_done, int err)
1434 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1435 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1436 struct extent_map_tree *tree = bio->bi_private;
1442 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1448 struct page *page = bvec->bv_page;
1449 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1451 end = start + bvec->bv_len - 1;
1453 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1458 if (--bvec >= bio->bi_io_vec)
1459 prefetchw(&bvec->bv_page->flags);
1461 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1462 ret = tree->ops->readpage_end_io_hook(page, start, end);
1467 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1469 SetPageUptodate(page);
1471 check_page_uptodate(tree, page);
1473 ClearPageUptodate(page);
1477 unlock_extent(tree, start, end, GFP_ATOMIC);
1482 check_page_locked(tree, page);
1483 } while (bvec >= bio->bi_io_vec);
1486 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1492 * IO done from prepare_write is pretty simple, we just unlock
1493 * the structs in the extent tree when done, and set the uptodate bits
1496 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1497 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1499 static int end_bio_extent_preparewrite(struct bio *bio,
1500 unsigned int bytes_done, int err)
1503 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1504 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1505 struct extent_map_tree *tree = bio->bi_private;
1509 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1515 struct page *page = bvec->bv_page;
1516 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1518 end = start + bvec->bv_len - 1;
1520 if (--bvec >= bio->bi_io_vec)
1521 prefetchw(&bvec->bv_page->flags);
1524 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1526 ClearPageUptodate(page);
1530 unlock_extent(tree, start, end, GFP_ATOMIC);
1532 } while (bvec >= bio->bi_io_vec);
1535 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1541 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1546 bio = bio_alloc(gfp_flags, nr_vecs);
1548 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1549 while (!bio && (nr_vecs /= 2))
1550 bio = bio_alloc(gfp_flags, nr_vecs);
1554 bio->bi_bdev = bdev;
1555 bio->bi_sector = first_sector;
1560 static int submit_one_bio(int rw, struct bio *bio)
1564 submit_bio(rw, bio);
1565 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1571 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1572 struct page *page, sector_t sector,
1573 size_t size, unsigned long offset,
1574 struct block_device *bdev,
1575 struct bio **bio_ret,
1576 unsigned long max_pages,
1577 bio_end_io_t end_io_func)
1583 if (bio_ret && *bio_ret) {
1585 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1586 bio_add_page(bio, page, size, offset) < size) {
1587 ret = submit_one_bio(rw, bio);
1593 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1594 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1596 printk("failed to allocate bio nr %d\n", nr);
1598 bio_add_page(bio, page, size, offset);
1599 bio->bi_end_io = end_io_func;
1600 bio->bi_private = tree;
1604 ret = submit_one_bio(rw, bio);
1610 void set_page_extent_mapped(struct page *page)
1612 if (!PagePrivate(page)) {
1613 SetPagePrivate(page);
1614 WARN_ON(!page->mapping->a_ops->invalidatepage);
1615 set_page_private(page, EXTENT_PAGE_PRIVATE);
1616 page_cache_get(page);
1621 * basic readpage implementation. Locked extent state structs are inserted
1622 * into the tree that are removed when the IO is done (by the end_io
1625 static int __extent_read_full_page(struct extent_map_tree *tree,
1627 get_extent_t *get_extent,
1630 struct inode *inode = page->mapping->host;
1631 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1632 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1636 u64 last_byte = i_size_read(inode);
1640 struct extent_map *em;
1641 struct block_device *bdev;
1644 size_t page_offset = 0;
1646 size_t blocksize = inode->i_sb->s_blocksize;
1648 set_page_extent_mapped(page);
1651 lock_extent(tree, start, end, GFP_NOFS);
1653 while (cur <= end) {
1654 if (cur >= last_byte) {
1655 iosize = PAGE_CACHE_SIZE - page_offset;
1656 zero_user_page(page, page_offset, iosize, KM_USER0);
1657 set_extent_uptodate(tree, cur, cur + iosize - 1,
1659 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1662 em = get_extent(inode, page, page_offset, cur, end, 0);
1663 if (IS_ERR(em) || !em) {
1665 unlock_extent(tree, cur, end, GFP_NOFS);
1669 extent_offset = cur - em->start;
1670 BUG_ON(em->end < cur);
1673 iosize = min(em->end - cur, end - cur) + 1;
1674 cur_end = min(em->end, end);
1675 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1676 sector = (em->block_start + extent_offset) >> 9;
1678 block_start = em->block_start;
1679 free_extent_map(em);
1682 /* we've found a hole, just zero and go on */
1683 if (block_start == EXTENT_MAP_HOLE) {
1684 zero_user_page(page, page_offset, iosize, KM_USER0);
1685 set_extent_uptodate(tree, cur, cur + iosize - 1,
1687 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1689 page_offset += iosize;
1692 /* the get_extent function already copied into the page */
1693 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1694 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1696 page_offset += iosize;
1701 if (tree->ops && tree->ops->readpage_io_hook) {
1702 ret = tree->ops->readpage_io_hook(page, cur,
1706 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1708 ret = submit_extent_page(READ, tree, page,
1709 sector, iosize, page_offset,
1711 end_bio_extent_readpage);
1716 page_offset += iosize;
1720 if (!PageError(page))
1721 SetPageUptodate(page);
1727 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1728 get_extent_t *get_extent)
1730 struct bio *bio = NULL;
1733 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1735 submit_one_bio(READ, bio);
1738 EXPORT_SYMBOL(extent_read_full_page);
1741 * the writepage semantics are similar to regular writepage. extent
1742 * records are inserted to lock ranges in the tree, and as dirty areas
1743 * are found, they are marked writeback. Then the lock bits are removed
1744 * and the end_io handler clears the writeback ranges
1746 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1749 struct inode *inode = page->mapping->host;
1750 struct extent_page_data *epd = data;
1751 struct extent_map_tree *tree = epd->tree;
1752 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1754 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1758 u64 last_byte = i_size_read(inode);
1762 struct extent_map *em;
1763 struct block_device *bdev;
1766 size_t page_offset = 0;
1768 loff_t i_size = i_size_read(inode);
1769 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1773 WARN_ON(!PageLocked(page));
1774 if (page->index > end_index) {
1775 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1780 if (page->index == end_index) {
1781 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1782 zero_user_page(page, offset,
1783 PAGE_CACHE_SIZE - offset, KM_USER0);
1786 set_page_extent_mapped(page);
1788 delalloc_start = start;
1790 while(delalloc_end < page_end) {
1791 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1794 if (nr_delalloc <= 0)
1796 tree->ops->fill_delalloc(inode, delalloc_start,
1798 clear_extent_bit(tree, delalloc_start,
1800 EXTENT_LOCKED | EXTENT_DELALLOC,
1802 delalloc_start = delalloc_end + 1;
1804 lock_extent(tree, start, page_end, GFP_NOFS);
1807 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1808 printk("found delalloc bits after lock_extent\n");
1811 if (last_byte <= start) {
1812 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1816 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1817 blocksize = inode->i_sb->s_blocksize;
1819 while (cur <= end) {
1820 if (cur >= last_byte) {
1821 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1824 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1825 if (IS_ERR(em) || !em) {
1830 extent_offset = cur - em->start;
1831 BUG_ON(em->end < cur);
1833 iosize = min(em->end - cur, end - cur) + 1;
1834 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1835 sector = (em->block_start + extent_offset) >> 9;
1837 block_start = em->block_start;
1838 free_extent_map(em);
1841 if (block_start == EXTENT_MAP_HOLE ||
1842 block_start == EXTENT_MAP_INLINE) {
1843 clear_extent_dirty(tree, cur,
1844 cur + iosize - 1, GFP_NOFS);
1846 page_offset += iosize;
1850 /* leave this out until we have a page_mkwrite call */
1851 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1854 page_offset += iosize;
1857 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1858 if (tree->ops && tree->ops->writepage_io_hook) {
1859 ret = tree->ops->writepage_io_hook(page, cur,
1867 unsigned long max_nr = end_index + 1;
1868 set_range_writeback(tree, cur, cur + iosize - 1);
1869 if (!PageWriteback(page)) {
1870 printk("warning page %lu not writeback, "
1871 "cur %llu end %llu\n", page->index,
1872 (unsigned long long)cur,
1873 (unsigned long long)end);
1876 ret = submit_extent_page(WRITE, tree, page, sector,
1877 iosize, page_offset, bdev,
1879 end_bio_extent_writepage);
1884 page_offset += iosize;
1889 /* make sure the mapping tag for page dirty gets cleared */
1890 set_page_writeback(page);
1891 end_page_writeback(page);
1893 unlock_extent(tree, start, page_end, GFP_NOFS);
1898 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1899 get_extent_t *get_extent,
1900 struct writeback_control *wbc)
1903 struct address_space *mapping = page->mapping;
1904 struct extent_page_data epd = {
1907 .get_extent = get_extent,
1909 struct writeback_control wbc_writepages = {
1911 .sync_mode = WB_SYNC_NONE,
1912 .older_than_this = NULL,
1914 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
1915 .range_end = (loff_t)-1,
1919 ret = __extent_writepage(page, wbc, &epd);
1921 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
1923 submit_one_bio(WRITE, epd.bio);
1926 EXPORT_SYMBOL(extent_write_full_page);
1928 int extent_writepages(struct extent_map_tree *tree,
1929 struct address_space *mapping,
1930 get_extent_t *get_extent,
1931 struct writeback_control *wbc)
1934 struct extent_page_data epd = {
1937 .get_extent = get_extent,
1940 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1942 submit_one_bio(WRITE, epd.bio);
1945 EXPORT_SYMBOL(extent_writepages);
1947 int extent_readpages(struct extent_map_tree *tree,
1948 struct address_space *mapping,
1949 struct list_head *pages, unsigned nr_pages,
1950 get_extent_t get_extent)
1952 struct bio *bio = NULL;
1954 struct pagevec pvec;
1956 pagevec_init(&pvec, 0);
1957 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1958 struct page *page = list_entry(pages->prev, struct page, lru);
1960 prefetchw(&page->flags);
1961 list_del(&page->lru);
1963 * what we want to do here is call add_to_page_cache_lru,
1964 * but that isn't exported, so we reproduce it here
1966 if (!add_to_page_cache(page, mapping,
1967 page->index, GFP_KERNEL)) {
1969 /* open coding of lru_cache_add, also not exported */
1970 page_cache_get(page);
1971 if (!pagevec_add(&pvec, page))
1972 __pagevec_lru_add(&pvec);
1973 __extent_read_full_page(tree, page, get_extent, &bio);
1975 page_cache_release(page);
1977 if (pagevec_count(&pvec))
1978 __pagevec_lru_add(&pvec);
1979 BUG_ON(!list_empty(pages));
1981 submit_one_bio(READ, bio);
1984 EXPORT_SYMBOL(extent_readpages);
1987 * basic invalidatepage code, this waits on any locked or writeback
1988 * ranges corresponding to the page, and then deletes any extent state
1989 * records from the tree
1991 int extent_invalidatepage(struct extent_map_tree *tree,
1992 struct page *page, unsigned long offset)
1994 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1995 u64 end = start + PAGE_CACHE_SIZE - 1;
1996 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1998 start += (offset + blocksize -1) & ~(blocksize - 1);
2002 lock_extent(tree, start, end, GFP_NOFS);
2003 wait_on_extent_writeback(tree, start, end);
2004 clear_extent_bit(tree, start, end,
2005 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2009 EXPORT_SYMBOL(extent_invalidatepage);
2012 * simple commit_write call, set_range_dirty is used to mark both
2013 * the pages and the extent records as dirty
2015 int extent_commit_write(struct extent_map_tree *tree,
2016 struct inode *inode, struct page *page,
2017 unsigned from, unsigned to)
2019 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2021 set_page_extent_mapped(page);
2022 set_page_dirty(page);
2024 if (pos > inode->i_size) {
2025 i_size_write(inode, pos);
2026 mark_inode_dirty(inode);
2030 EXPORT_SYMBOL(extent_commit_write);
2032 int extent_prepare_write(struct extent_map_tree *tree,
2033 struct inode *inode, struct page *page,
2034 unsigned from, unsigned to, get_extent_t *get_extent)
2036 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2037 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2039 u64 orig_block_start;
2042 struct extent_map *em;
2043 unsigned blocksize = 1 << inode->i_blkbits;
2044 size_t page_offset = 0;
2045 size_t block_off_start;
2046 size_t block_off_end;
2052 set_page_extent_mapped(page);
2054 block_start = (page_start + from) & ~((u64)blocksize - 1);
2055 block_end = (page_start + to - 1) | (blocksize - 1);
2056 orig_block_start = block_start;
2058 lock_extent(tree, page_start, page_end, GFP_NOFS);
2059 while(block_start <= block_end) {
2060 em = get_extent(inode, page, page_offset, block_start,
2062 if (IS_ERR(em) || !em) {
2065 cur_end = min(block_end, em->end);
2066 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2067 block_off_end = block_off_start + blocksize;
2068 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2070 if (!PageUptodate(page) && isnew &&
2071 (block_off_end > to || block_off_start < from)) {
2074 kaddr = kmap_atomic(page, KM_USER0);
2075 if (block_off_end > to)
2076 memset(kaddr + to, 0, block_off_end - to);
2077 if (block_off_start < from)
2078 memset(kaddr + block_off_start, 0,
2079 from - block_off_start);
2080 flush_dcache_page(page);
2081 kunmap_atomic(kaddr, KM_USER0);
2083 if (!isnew && !PageUptodate(page) &&
2084 (block_off_end > to || block_off_start < from) &&
2085 !test_range_bit(tree, block_start, cur_end,
2086 EXTENT_UPTODATE, 1)) {
2088 u64 extent_offset = block_start - em->start;
2090 sector = (em->block_start + extent_offset) >> 9;
2091 iosize = (cur_end - block_start + blocksize - 1) &
2092 ~((u64)blocksize - 1);
2094 * we've already got the extent locked, but we
2095 * need to split the state such that our end_bio
2096 * handler can clear the lock.
2098 set_extent_bit(tree, block_start,
2099 block_start + iosize - 1,
2100 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2101 ret = submit_extent_page(READ, tree, page,
2102 sector, iosize, page_offset, em->bdev,
2104 end_bio_extent_preparewrite);
2106 block_start = block_start + iosize;
2108 set_extent_uptodate(tree, block_start, cur_end,
2110 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2111 block_start = cur_end + 1;
2113 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2114 free_extent_map(em);
2117 wait_extent_bit(tree, orig_block_start,
2118 block_end, EXTENT_LOCKED);
2120 check_page_uptodate(tree, page);
2122 /* FIXME, zero out newly allocated blocks on error */
2125 EXPORT_SYMBOL(extent_prepare_write);
2128 * a helper for releasepage. As long as there are no locked extents
2129 * in the range corresponding to the page, both state records and extent
2130 * map records are removed
2132 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2134 struct extent_map *em;
2135 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2136 u64 end = start + PAGE_CACHE_SIZE - 1;
2137 u64 orig_start = start;
2140 while (start <= end) {
2141 em = lookup_extent_mapping(tree, start, end);
2142 if (!em || IS_ERR(em))
2144 if (!test_range_bit(tree, em->start, em->end,
2145 EXTENT_LOCKED, 0)) {
2146 remove_extent_mapping(tree, em);
2147 /* once for the rb tree */
2148 free_extent_map(em);
2150 start = em->end + 1;
2152 free_extent_map(em);
2154 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2157 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2161 EXPORT_SYMBOL(try_release_extent_mapping);
2163 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2164 get_extent_t *get_extent)
2166 struct inode *inode = mapping->host;
2167 u64 start = iblock << inode->i_blkbits;
2168 u64 end = start + (1 << inode->i_blkbits) - 1;
2169 sector_t sector = 0;
2170 struct extent_map *em;
2172 em = get_extent(inode, NULL, 0, start, end, 0);
2173 if (!em || IS_ERR(em))
2176 if (em->block_start == EXTENT_MAP_INLINE ||
2177 em->block_start == EXTENT_MAP_HOLE)
2180 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2182 free_extent_map(em);
2186 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2188 if (list_empty(&eb->lru)) {
2189 extent_buffer_get(eb);
2190 list_add(&eb->lru, &tree->buffer_lru);
2192 if (tree->lru_size >= BUFFER_LRU_MAX) {
2193 struct extent_buffer *rm;
2194 rm = list_entry(tree->buffer_lru.prev,
2195 struct extent_buffer, lru);
2197 list_del_init(&rm->lru);
2198 free_extent_buffer(rm);
2201 list_move(&eb->lru, &tree->buffer_lru);
2204 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2205 u64 start, unsigned long len)
2207 struct list_head *lru = &tree->buffer_lru;
2208 struct list_head *cur = lru->next;
2209 struct extent_buffer *eb;
2211 if (list_empty(lru))
2215 eb = list_entry(cur, struct extent_buffer, lru);
2216 if (eb->start == start && eb->len == len) {
2217 extent_buffer_get(eb);
2221 } while (cur != lru);
2225 static inline unsigned long num_extent_pages(u64 start, u64 len)
2227 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2228 (start >> PAGE_CACHE_SHIFT);
2231 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2235 struct address_space *mapping;
2238 return eb->first_page;
2239 i += eb->start >> PAGE_CACHE_SHIFT;
2240 mapping = eb->first_page->mapping;
2241 read_lock_irq(&mapping->tree_lock);
2242 p = radix_tree_lookup(&mapping->page_tree, i);
2243 read_unlock_irq(&mapping->tree_lock);
2247 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2252 struct extent_buffer *eb = NULL;
2254 spin_lock(&tree->lru_lock);
2255 eb = find_lru(tree, start, len);
2256 spin_unlock(&tree->lru_lock);
2261 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2262 INIT_LIST_HEAD(&eb->lru);
2265 atomic_set(&eb->refs, 1);
2270 static void __free_extent_buffer(struct extent_buffer *eb)
2272 kmem_cache_free(extent_buffer_cache, eb);
2275 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2276 u64 start, unsigned long len,
2280 unsigned long num_pages = num_extent_pages(start, len);
2282 unsigned long index = start >> PAGE_CACHE_SHIFT;
2283 struct extent_buffer *eb;
2285 struct address_space *mapping = tree->mapping;
2288 eb = __alloc_extent_buffer(tree, start, len, mask);
2289 if (!eb || IS_ERR(eb))
2292 if (eb->flags & EXTENT_BUFFER_FILLED)
2296 eb->first_page = page0;
2299 page_cache_get(page0);
2300 mark_page_accessed(page0);
2301 set_page_extent_mapped(page0);
2302 WARN_ON(!PageUptodate(page0));
2303 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2308 for (; i < num_pages; i++, index++) {
2309 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2314 set_page_extent_mapped(p);
2315 mark_page_accessed(p);
2318 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2321 set_page_private(p, EXTENT_PAGE_PRIVATE);
2323 if (!PageUptodate(p))
2328 eb->flags |= EXTENT_UPTODATE;
2329 eb->flags |= EXTENT_BUFFER_FILLED;
2332 spin_lock(&tree->lru_lock);
2334 spin_unlock(&tree->lru_lock);
2338 spin_lock(&tree->lru_lock);
2339 list_del_init(&eb->lru);
2340 spin_unlock(&tree->lru_lock);
2341 if (!atomic_dec_and_test(&eb->refs))
2343 for (index = 1; index < i; index++) {
2344 page_cache_release(extent_buffer_page(eb, index));
2347 page_cache_release(extent_buffer_page(eb, 0));
2348 __free_extent_buffer(eb);
2351 EXPORT_SYMBOL(alloc_extent_buffer);
2353 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2354 u64 start, unsigned long len,
2357 unsigned long num_pages = num_extent_pages(start, len);
2359 unsigned long index = start >> PAGE_CACHE_SHIFT;
2360 struct extent_buffer *eb;
2362 struct address_space *mapping = tree->mapping;
2365 eb = __alloc_extent_buffer(tree, start, len, mask);
2366 if (!eb || IS_ERR(eb))
2369 if (eb->flags & EXTENT_BUFFER_FILLED)
2372 for (i = 0; i < num_pages; i++, index++) {
2373 p = find_lock_page(mapping, index);
2377 set_page_extent_mapped(p);
2378 mark_page_accessed(p);
2382 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2385 set_page_private(p, EXTENT_PAGE_PRIVATE);
2388 if (!PageUptodate(p))
2393 eb->flags |= EXTENT_UPTODATE;
2394 eb->flags |= EXTENT_BUFFER_FILLED;
2397 spin_lock(&tree->lru_lock);
2399 spin_unlock(&tree->lru_lock);
2402 spin_lock(&tree->lru_lock);
2403 list_del_init(&eb->lru);
2404 spin_unlock(&tree->lru_lock);
2405 if (!atomic_dec_and_test(&eb->refs))
2407 for (index = 1; index < i; index++) {
2408 page_cache_release(extent_buffer_page(eb, index));
2411 page_cache_release(extent_buffer_page(eb, 0));
2412 __free_extent_buffer(eb);
2415 EXPORT_SYMBOL(find_extent_buffer);
2417 void free_extent_buffer(struct extent_buffer *eb)
2420 unsigned long num_pages;
2425 if (!atomic_dec_and_test(&eb->refs))
2428 WARN_ON(!list_empty(&eb->lru));
2429 num_pages = num_extent_pages(eb->start, eb->len);
2431 for (i = 1; i < num_pages; i++) {
2432 page_cache_release(extent_buffer_page(eb, i));
2434 page_cache_release(extent_buffer_page(eb, 0));
2435 __free_extent_buffer(eb);
2437 EXPORT_SYMBOL(free_extent_buffer);
2439 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2440 struct extent_buffer *eb)
2444 unsigned long num_pages;
2447 u64 start = eb->start;
2448 u64 end = start + eb->len - 1;
2450 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2451 num_pages = num_extent_pages(eb->start, eb->len);
2453 for (i = 0; i < num_pages; i++) {
2454 page = extent_buffer_page(eb, i);
2457 * if we're on the last page or the first page and the
2458 * block isn't aligned on a page boundary, do extra checks
2459 * to make sure we don't clean page that is partially dirty
2461 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2462 ((i == num_pages - 1) &&
2463 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2464 start = (u64)page->index << PAGE_CACHE_SHIFT;
2465 end = start + PAGE_CACHE_SIZE - 1;
2466 if (test_range_bit(tree, start, end,
2472 clear_page_dirty_for_io(page);
2473 write_lock_irq(&page->mapping->tree_lock);
2474 if (!PageDirty(page)) {
2475 radix_tree_tag_clear(&page->mapping->page_tree,
2477 PAGECACHE_TAG_DIRTY);
2479 write_unlock_irq(&page->mapping->tree_lock);
2484 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2486 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2487 struct extent_buffer *eb)
2489 return wait_on_extent_writeback(tree, eb->start,
2490 eb->start + eb->len - 1);
2492 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2494 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2495 struct extent_buffer *eb)
2498 unsigned long num_pages;
2500 num_pages = num_extent_pages(eb->start, eb->len);
2501 for (i = 0; i < num_pages; i++) {
2502 struct page *page = extent_buffer_page(eb, i);
2503 /* writepage may need to do something special for the
2504 * first page, we have to make sure page->private is
2505 * properly set. releasepage may drop page->private
2506 * on us if the page isn't already dirty.
2510 set_page_private(page,
2511 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2514 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2518 return set_extent_dirty(tree, eb->start,
2519 eb->start + eb->len - 1, GFP_NOFS);
2521 EXPORT_SYMBOL(set_extent_buffer_dirty);
2523 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2524 struct extent_buffer *eb)
2528 unsigned long num_pages;
2530 num_pages = num_extent_pages(eb->start, eb->len);
2532 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2534 for (i = 0; i < num_pages; i++) {
2535 page = extent_buffer_page(eb, i);
2536 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2537 ((i == num_pages - 1) &&
2538 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2539 check_page_uptodate(tree, page);
2542 SetPageUptodate(page);
2546 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2548 int extent_buffer_uptodate(struct extent_map_tree *tree,
2549 struct extent_buffer *eb)
2551 if (eb->flags & EXTENT_UPTODATE)
2553 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2554 EXTENT_UPTODATE, 1);
2556 EXPORT_SYMBOL(extent_buffer_uptodate);
2558 int read_extent_buffer_pages(struct extent_map_tree *tree,
2559 struct extent_buffer *eb,
2564 unsigned long start_i;
2568 unsigned long num_pages;
2570 if (eb->flags & EXTENT_UPTODATE)
2573 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2574 EXTENT_UPTODATE, 1)) {
2579 WARN_ON(start < eb->start);
2580 start_i = (start >> PAGE_CACHE_SHIFT) -
2581 (eb->start >> PAGE_CACHE_SHIFT);
2586 num_pages = num_extent_pages(eb->start, eb->len);
2587 for (i = start_i; i < num_pages; i++) {
2588 page = extent_buffer_page(eb, i);
2589 if (PageUptodate(page)) {
2593 if (TestSetPageLocked(page)) {
2599 if (!PageUptodate(page)) {
2600 err = page->mapping->a_ops->readpage(NULL, page);
2613 for (i = start_i; i < num_pages; i++) {
2614 page = extent_buffer_page(eb, i);
2615 wait_on_page_locked(page);
2616 if (!PageUptodate(page)) {
2621 eb->flags |= EXTENT_UPTODATE;
2624 EXPORT_SYMBOL(read_extent_buffer_pages);
2626 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2627 unsigned long start,
2634 char *dst = (char *)dstv;
2635 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2636 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2637 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2639 WARN_ON(start > eb->len);
2640 WARN_ON(start + len > eb->start + eb->len);
2642 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2645 page = extent_buffer_page(eb, i);
2646 if (!PageUptodate(page)) {
2647 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2650 WARN_ON(!PageUptodate(page));
2652 cur = min(len, (PAGE_CACHE_SIZE - offset));
2653 kaddr = kmap_atomic(page, KM_USER1);
2654 memcpy(dst, kaddr + offset, cur);
2655 kunmap_atomic(kaddr, KM_USER1);
2663 EXPORT_SYMBOL(read_extent_buffer);
2665 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2666 unsigned long min_len, char **token, char **map,
2667 unsigned long *map_start,
2668 unsigned long *map_len, int km)
2670 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2673 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2674 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2675 unsigned long end_i = (start_offset + start + min_len - 1) >>
2682 offset = start_offset;
2686 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2688 if (start + min_len > eb->len) {
2689 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2693 p = extent_buffer_page(eb, i);
2694 WARN_ON(!PageUptodate(p));
2695 kaddr = kmap_atomic(p, km);
2697 *map = kaddr + offset;
2698 *map_len = PAGE_CACHE_SIZE - offset;
2701 EXPORT_SYMBOL(map_private_extent_buffer);
2703 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2704 unsigned long min_len,
2705 char **token, char **map,
2706 unsigned long *map_start,
2707 unsigned long *map_len, int km)
2711 if (eb->map_token) {
2712 unmap_extent_buffer(eb, eb->map_token, km);
2713 eb->map_token = NULL;
2716 err = map_private_extent_buffer(eb, start, min_len, token, map,
2717 map_start, map_len, km);
2719 eb->map_token = *token;
2721 eb->map_start = *map_start;
2722 eb->map_len = *map_len;
2726 EXPORT_SYMBOL(map_extent_buffer);
2728 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2730 kunmap_atomic(token, km);
2732 EXPORT_SYMBOL(unmap_extent_buffer);
2734 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2735 unsigned long start,
2742 char *ptr = (char *)ptrv;
2743 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2744 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2747 WARN_ON(start > eb->len);
2748 WARN_ON(start + len > eb->start + eb->len);
2750 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2753 page = extent_buffer_page(eb, i);
2754 WARN_ON(!PageUptodate(page));
2756 cur = min(len, (PAGE_CACHE_SIZE - offset));
2758 kaddr = kmap_atomic(page, KM_USER0);
2759 ret = memcmp(ptr, kaddr + offset, cur);
2760 kunmap_atomic(kaddr, KM_USER0);
2771 EXPORT_SYMBOL(memcmp_extent_buffer);
2773 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2774 unsigned long start, unsigned long len)
2780 char *src = (char *)srcv;
2781 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2782 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2784 WARN_ON(start > eb->len);
2785 WARN_ON(start + len > eb->start + eb->len);
2787 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2790 page = extent_buffer_page(eb, i);
2791 WARN_ON(!PageUptodate(page));
2793 cur = min(len, PAGE_CACHE_SIZE - offset);
2794 kaddr = kmap_atomic(page, KM_USER1);
2795 memcpy(kaddr + offset, src, cur);
2796 kunmap_atomic(kaddr, KM_USER1);
2804 EXPORT_SYMBOL(write_extent_buffer);
2806 void memset_extent_buffer(struct extent_buffer *eb, char c,
2807 unsigned long start, unsigned long len)
2813 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2814 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2816 WARN_ON(start > eb->len);
2817 WARN_ON(start + len > eb->start + eb->len);
2819 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2822 page = extent_buffer_page(eb, i);
2823 WARN_ON(!PageUptodate(page));
2825 cur = min(len, PAGE_CACHE_SIZE - offset);
2826 kaddr = kmap_atomic(page, KM_USER0);
2827 memset(kaddr + offset, c, cur);
2828 kunmap_atomic(kaddr, KM_USER0);
2835 EXPORT_SYMBOL(memset_extent_buffer);
2837 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2838 unsigned long dst_offset, unsigned long src_offset,
2841 u64 dst_len = dst->len;
2846 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2847 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2849 WARN_ON(src->len != dst_len);
2851 offset = (start_offset + dst_offset) &
2852 ((unsigned long)PAGE_CACHE_SIZE - 1);
2855 page = extent_buffer_page(dst, i);
2856 WARN_ON(!PageUptodate(page));
2858 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2860 kaddr = kmap_atomic(page, KM_USER0);
2861 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2862 kunmap_atomic(kaddr, KM_USER0);
2870 EXPORT_SYMBOL(copy_extent_buffer);
2872 static void move_pages(struct page *dst_page, struct page *src_page,
2873 unsigned long dst_off, unsigned long src_off,
2876 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2877 if (dst_page == src_page) {
2878 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2880 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2881 char *p = dst_kaddr + dst_off + len;
2882 char *s = src_kaddr + src_off + len;
2887 kunmap_atomic(src_kaddr, KM_USER1);
2889 kunmap_atomic(dst_kaddr, KM_USER0);
2892 static void copy_pages(struct page *dst_page, struct page *src_page,
2893 unsigned long dst_off, unsigned long src_off,
2896 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2899 if (dst_page != src_page)
2900 src_kaddr = kmap_atomic(src_page, KM_USER1);
2902 src_kaddr = dst_kaddr;
2904 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2905 kunmap_atomic(dst_kaddr, KM_USER0);
2906 if (dst_page != src_page)
2907 kunmap_atomic(src_kaddr, KM_USER1);
2910 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2911 unsigned long src_offset, unsigned long len)
2914 size_t dst_off_in_page;
2915 size_t src_off_in_page;
2916 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2917 unsigned long dst_i;
2918 unsigned long src_i;
2920 if (src_offset + len > dst->len) {
2921 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2922 src_offset, len, dst->len);
2925 if (dst_offset + len > dst->len) {
2926 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2927 dst_offset, len, dst->len);
2932 dst_off_in_page = (start_offset + dst_offset) &
2933 ((unsigned long)PAGE_CACHE_SIZE - 1);
2934 src_off_in_page = (start_offset + src_offset) &
2935 ((unsigned long)PAGE_CACHE_SIZE - 1);
2937 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2938 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2940 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2942 cur = min_t(unsigned long, cur,
2943 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2945 copy_pages(extent_buffer_page(dst, dst_i),
2946 extent_buffer_page(dst, src_i),
2947 dst_off_in_page, src_off_in_page, cur);
2954 EXPORT_SYMBOL(memcpy_extent_buffer);
2956 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2957 unsigned long src_offset, unsigned long len)
2960 size_t dst_off_in_page;
2961 size_t src_off_in_page;
2962 unsigned long dst_end = dst_offset + len - 1;
2963 unsigned long src_end = src_offset + len - 1;
2964 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2965 unsigned long dst_i;
2966 unsigned long src_i;
2968 if (src_offset + len > dst->len) {
2969 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2970 src_offset, len, dst->len);
2973 if (dst_offset + len > dst->len) {
2974 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2975 dst_offset, len, dst->len);
2978 if (dst_offset < src_offset) {
2979 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2983 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2984 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2986 dst_off_in_page = (start_offset + dst_end) &
2987 ((unsigned long)PAGE_CACHE_SIZE - 1);
2988 src_off_in_page = (start_offset + src_end) &
2989 ((unsigned long)PAGE_CACHE_SIZE - 1);
2991 cur = min_t(unsigned long, len, src_off_in_page + 1);
2992 cur = min(cur, dst_off_in_page + 1);
2993 move_pages(extent_buffer_page(dst, dst_i),
2994 extent_buffer_page(dst, src_i),
2995 dst_off_in_page - cur + 1,
2996 src_off_in_page - cur + 1, cur);
3003 EXPORT_SYMBOL(memmove_extent_buffer);