1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
37 struct rb_node rb_node;
40 struct extent_page_data {
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
46 void __init extent_map_init(void)
48 extent_map_cache = btrfs_cache_create("extent_map",
49 sizeof(struct extent_map), 0,
51 extent_state_cache = btrfs_cache_create("extent_state",
52 sizeof(struct extent_state), 0,
54 extent_buffer_cache = btrfs_cache_create("extent_buffers",
55 sizeof(struct extent_buffer), 0,
59 void __exit extent_map_exit(void)
61 struct extent_state *state;
63 while (!list_empty(&states)) {
64 state = list_entry(states.next, struct extent_state, list);
65 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
66 list_del(&state->list);
67 kmem_cache_free(extent_state_cache, state);
72 kmem_cache_destroy(extent_map_cache);
73 if (extent_state_cache)
74 kmem_cache_destroy(extent_state_cache);
75 if (extent_buffer_cache)
76 kmem_cache_destroy(extent_buffer_cache);
79 void extent_map_tree_init(struct extent_map_tree *tree,
80 struct address_space *mapping, gfp_t mask)
82 tree->map.rb_node = NULL;
83 tree->state.rb_node = NULL;
85 rwlock_init(&tree->lock);
86 spin_lock_init(&tree->lru_lock);
87 tree->mapping = mapping;
88 INIT_LIST_HEAD(&tree->buffer_lru);
91 EXPORT_SYMBOL(extent_map_tree_init);
93 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
95 struct extent_buffer *eb;
96 while(!list_empty(&tree->buffer_lru)) {
97 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
100 free_extent_buffer(eb);
103 EXPORT_SYMBOL(extent_map_tree_empty_lru);
105 struct extent_map *alloc_extent_map(gfp_t mask)
107 struct extent_map *em;
108 em = kmem_cache_alloc(extent_map_cache, mask);
109 if (!em || IS_ERR(em))
112 atomic_set(&em->refs, 1);
115 EXPORT_SYMBOL(alloc_extent_map);
117 void free_extent_map(struct extent_map *em)
121 if (atomic_dec_and_test(&em->refs)) {
122 WARN_ON(em->in_tree);
123 kmem_cache_free(extent_map_cache, em);
126 EXPORT_SYMBOL(free_extent_map);
129 struct extent_state *alloc_extent_state(gfp_t mask)
131 struct extent_state *state;
134 state = kmem_cache_alloc(extent_state_cache, mask);
135 if (!state || IS_ERR(state))
141 spin_lock_irqsave(&state_lock, flags);
142 list_add(&state->list, &states);
143 spin_unlock_irqrestore(&state_lock, flags);
145 atomic_set(&state->refs, 1);
146 init_waitqueue_head(&state->wq);
149 EXPORT_SYMBOL(alloc_extent_state);
151 void free_extent_state(struct extent_state *state)
156 if (atomic_dec_and_test(&state->refs)) {
157 WARN_ON(state->in_tree);
158 spin_lock_irqsave(&state_lock, flags);
159 list_del(&state->list);
160 spin_unlock_irqrestore(&state_lock, flags);
161 kmem_cache_free(extent_state_cache, state);
164 EXPORT_SYMBOL(free_extent_state);
166 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
167 struct rb_node *node)
169 struct rb_node ** p = &root->rb_node;
170 struct rb_node * parent = NULL;
171 struct tree_entry *entry;
175 entry = rb_entry(parent, struct tree_entry, rb_node);
177 if (offset < entry->start)
179 else if (offset > entry->end)
185 entry = rb_entry(node, struct tree_entry, rb_node);
187 rb_link_node(node, parent, p);
188 rb_insert_color(node, root);
192 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
193 struct rb_node **prev_ret)
195 struct rb_node * n = root->rb_node;
196 struct rb_node *prev = NULL;
197 struct tree_entry *entry;
198 struct tree_entry *prev_entry = NULL;
201 entry = rb_entry(n, struct tree_entry, rb_node);
205 if (offset < entry->start)
207 else if (offset > entry->end)
214 while(prev && offset > prev_entry->end) {
215 prev = rb_next(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
224 struct rb_node *prev;
226 ret = __tree_search(root, offset, &prev);
232 static int tree_delete(struct rb_root *root, u64 offset)
234 struct rb_node *node;
235 struct tree_entry *entry;
237 node = __tree_search(root, offset, NULL);
240 entry = rb_entry(node, struct tree_entry, rb_node);
242 rb_erase(node, root);
247 * add_extent_mapping tries a simple backward merge with existing
248 * mappings. The extent_map struct passed in will be inserted into
249 * the tree directly (no copies made, just a reference taken).
251 int add_extent_mapping(struct extent_map_tree *tree,
252 struct extent_map *em)
255 struct extent_map *prev = NULL;
258 write_lock_irq(&tree->lock);
259 rb = tree_insert(&tree->map, em->end, &em->rb_node);
261 prev = rb_entry(rb, struct extent_map, rb_node);
262 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
266 atomic_inc(&em->refs);
267 if (em->start != 0) {
268 rb = rb_prev(&em->rb_node);
270 prev = rb_entry(rb, struct extent_map, rb_node);
271 if (prev && prev->end + 1 == em->start &&
272 ((em->block_start == EXTENT_MAP_HOLE &&
273 prev->block_start == EXTENT_MAP_HOLE) ||
274 (em->block_start == EXTENT_MAP_INLINE &&
275 prev->block_start == EXTENT_MAP_INLINE) ||
276 (em->block_start == EXTENT_MAP_DELALLOC &&
277 prev->block_start == EXTENT_MAP_DELALLOC) ||
278 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
279 em->block_start == prev->block_end + 1))) {
280 em->start = prev->start;
281 em->block_start = prev->block_start;
282 rb_erase(&prev->rb_node, &tree->map);
284 free_extent_map(prev);
288 write_unlock_irq(&tree->lock);
291 EXPORT_SYMBOL(add_extent_mapping);
294 * lookup_extent_mapping returns the first extent_map struct in the
295 * tree that intersects the [start, end] (inclusive) range. There may
296 * be additional objects in the tree that intersect, so check the object
297 * returned carefully to make sure you don't need additional lookups.
299 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
302 struct extent_map *em;
303 struct rb_node *rb_node;
305 read_lock_irq(&tree->lock);
306 rb_node = tree_search(&tree->map, start);
311 if (IS_ERR(rb_node)) {
312 em = ERR_PTR(PTR_ERR(rb_node));
315 em = rb_entry(rb_node, struct extent_map, rb_node);
316 if (em->end < start || em->start > end) {
320 atomic_inc(&em->refs);
322 read_unlock_irq(&tree->lock);
325 EXPORT_SYMBOL(lookup_extent_mapping);
328 * removes an extent_map struct from the tree. No reference counts are
329 * dropped, and no checks are done to see if the range is in use
331 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
335 write_lock_irq(&tree->lock);
336 ret = tree_delete(&tree->map, em->end);
337 write_unlock_irq(&tree->lock);
340 EXPORT_SYMBOL(remove_extent_mapping);
343 * utility function to look for merge candidates inside a given range.
344 * Any extents with matching state are merged together into a single
345 * extent in the tree. Extents with EXTENT_IO in their state field
346 * are not merged because the end_io handlers need to be able to do
347 * operations on them without sleeping (or doing allocations/splits).
349 * This should be called with the tree lock held.
351 static int merge_state(struct extent_map_tree *tree,
352 struct extent_state *state)
354 struct extent_state *other;
355 struct rb_node *other_node;
357 if (state->state & EXTENT_IOBITS)
360 other_node = rb_prev(&state->rb_node);
362 other = rb_entry(other_node, struct extent_state, rb_node);
363 if (other->end == state->start - 1 &&
364 other->state == state->state) {
365 state->start = other->start;
367 rb_erase(&other->rb_node, &tree->state);
368 free_extent_state(other);
371 other_node = rb_next(&state->rb_node);
373 other = rb_entry(other_node, struct extent_state, rb_node);
374 if (other->start == state->end + 1 &&
375 other->state == state->state) {
376 other->start = state->start;
378 rb_erase(&state->rb_node, &tree->state);
379 free_extent_state(state);
386 * insert an extent_state struct into the tree. 'bits' are set on the
387 * struct before it is inserted.
389 * This may return -EEXIST if the extent is already there, in which case the
390 * state struct is freed.
392 * The tree lock is not taken internally. This is a utility function and
393 * probably isn't what you want to call (see set/clear_extent_bit).
395 static int insert_state(struct extent_map_tree *tree,
396 struct extent_state *state, u64 start, u64 end,
399 struct rb_node *node;
402 printk("end < start %Lu %Lu\n", end, start);
405 state->state |= bits;
406 state->start = start;
408 node = tree_insert(&tree->state, end, &state->rb_node);
410 struct extent_state *found;
411 found = rb_entry(node, struct extent_state, rb_node);
412 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
413 free_extent_state(state);
416 merge_state(tree, state);
421 * split a given extent state struct in two, inserting the preallocated
422 * struct 'prealloc' as the newly created second half. 'split' indicates an
423 * offset inside 'orig' where it should be split.
426 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
427 * are two extent state structs in the tree:
428 * prealloc: [orig->start, split - 1]
429 * orig: [ split, orig->end ]
431 * The tree locks are not taken by this function. They need to be held
434 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
435 struct extent_state *prealloc, u64 split)
437 struct rb_node *node;
438 prealloc->start = orig->start;
439 prealloc->end = split - 1;
440 prealloc->state = orig->state;
443 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
445 struct extent_state *found;
446 found = rb_entry(node, struct extent_state, rb_node);
447 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
448 free_extent_state(prealloc);
455 * utility function to clear some bits in an extent state struct.
456 * it will optionally wake up any one waiting on this state (wake == 1), or
457 * forcibly remove the state from the tree (delete == 1).
459 * If no bits are set on the state struct after clearing things, the
460 * struct is freed and removed from the tree
462 static int clear_state_bit(struct extent_map_tree *tree,
463 struct extent_state *state, int bits, int wake,
466 int ret = state->state & bits;
467 state->state &= ~bits;
470 if (delete || state->state == 0) {
471 if (state->in_tree) {
472 rb_erase(&state->rb_node, &tree->state);
474 free_extent_state(state);
479 merge_state(tree, state);
485 * clear some bits on a range in the tree. This may require splitting
486 * or inserting elements in the tree, so the gfp mask is used to
487 * indicate which allocations or sleeping are allowed.
489 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
490 * the given range from the tree regardless of state (ie for truncate).
492 * the range [start, end] is inclusive.
494 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
495 * bits were already set, or zero if none of the bits were already set.
497 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
498 int bits, int wake, int delete, gfp_t mask)
500 struct extent_state *state;
501 struct extent_state *prealloc = NULL;
502 struct rb_node *node;
508 if (!prealloc && (mask & __GFP_WAIT)) {
509 prealloc = alloc_extent_state(mask);
514 write_lock_irqsave(&tree->lock, flags);
516 * this search will find the extents that end after
519 node = tree_search(&tree->state, start);
522 state = rb_entry(node, struct extent_state, rb_node);
523 if (state->start > end)
525 WARN_ON(state->end < start);
528 * | ---- desired range ---- |
530 * | ------------- state -------------- |
532 * We need to split the extent we found, and may flip
533 * bits on second half.
535 * If the extent we found extends past our range, we
536 * just split and search again. It'll get split again
537 * the next time though.
539 * If the extent we found is inside our range, we clear
540 * the desired bit on it.
543 if (state->start < start) {
544 err = split_state(tree, state, prealloc, start);
545 BUG_ON(err == -EEXIST);
549 if (state->end <= end) {
550 start = state->end + 1;
551 set |= clear_state_bit(tree, state, bits,
554 start = state->start;
559 * | ---- desired range ---- |
561 * We need to split the extent, and clear the bit
564 if (state->start <= end && state->end > end) {
565 err = split_state(tree, state, prealloc, end + 1);
566 BUG_ON(err == -EEXIST);
570 set |= clear_state_bit(tree, prealloc, bits,
576 start = state->end + 1;
577 set |= clear_state_bit(tree, state, bits, wake, delete);
581 write_unlock_irqrestore(&tree->lock, flags);
583 free_extent_state(prealloc);
590 write_unlock_irqrestore(&tree->lock, flags);
591 if (mask & __GFP_WAIT)
595 EXPORT_SYMBOL(clear_extent_bit);
597 static int wait_on_state(struct extent_map_tree *tree,
598 struct extent_state *state)
601 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
602 read_unlock_irq(&tree->lock);
604 read_lock_irq(&tree->lock);
605 finish_wait(&state->wq, &wait);
610 * waits for one or more bits to clear on a range in the state tree.
611 * The range [start, end] is inclusive.
612 * The tree lock is taken by this function
614 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
616 struct extent_state *state;
617 struct rb_node *node;
619 read_lock_irq(&tree->lock);
623 * this search will find all the extents that end after
626 node = tree_search(&tree->state, start);
630 state = rb_entry(node, struct extent_state, rb_node);
632 if (state->start > end)
635 if (state->state & bits) {
636 start = state->start;
637 atomic_inc(&state->refs);
638 wait_on_state(tree, state);
639 free_extent_state(state);
642 start = state->end + 1;
647 if (need_resched()) {
648 read_unlock_irq(&tree->lock);
650 read_lock_irq(&tree->lock);
654 read_unlock_irq(&tree->lock);
657 EXPORT_SYMBOL(wait_extent_bit);
660 * set some bits on a range in the tree. This may require allocations
661 * or sleeping, so the gfp mask is used to indicate what is allowed.
663 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
664 * range already has the desired bits set. The start of the existing
665 * range is returned in failed_start in this case.
667 * [start, end] is inclusive
668 * This takes the tree lock.
670 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
671 int exclusive, u64 *failed_start, gfp_t mask)
673 struct extent_state *state;
674 struct extent_state *prealloc = NULL;
675 struct rb_node *node;
682 if (!prealloc && (mask & __GFP_WAIT)) {
683 prealloc = alloc_extent_state(mask);
688 write_lock_irqsave(&tree->lock, flags);
690 * this search will find all the extents that end after
693 node = tree_search(&tree->state, start);
695 err = insert_state(tree, prealloc, start, end, bits);
697 BUG_ON(err == -EEXIST);
701 state = rb_entry(node, struct extent_state, rb_node);
702 last_start = state->start;
703 last_end = state->end;
706 * | ---- desired range ---- |
709 * Just lock what we found and keep going
711 if (state->start == start && state->end <= end) {
712 set = state->state & bits;
713 if (set && exclusive) {
714 *failed_start = state->start;
718 state->state |= bits;
719 start = state->end + 1;
720 merge_state(tree, state);
725 * | ---- desired range ---- |
728 * | ------------- state -------------- |
730 * We need to split the extent we found, and may flip bits on
733 * If the extent we found extends past our
734 * range, we just split and search again. It'll get split
735 * again the next time though.
737 * If the extent we found is inside our range, we set the
740 if (state->start < start) {
741 set = state->state & bits;
742 if (exclusive && set) {
743 *failed_start = start;
747 err = split_state(tree, state, prealloc, start);
748 BUG_ON(err == -EEXIST);
752 if (state->end <= end) {
753 state->state |= bits;
754 start = state->end + 1;
755 merge_state(tree, state);
757 start = state->start;
762 * | ---- desired range ---- |
763 * | state | or | state |
765 * There's a hole, we need to insert something in it and
766 * ignore the extent we found.
768 if (state->start > start) {
770 if (end < last_start)
773 this_end = last_start -1;
774 err = insert_state(tree, prealloc, start, this_end,
777 BUG_ON(err == -EEXIST);
780 start = this_end + 1;
784 * | ---- desired range ---- |
786 * We need to split the extent, and set the bit
789 if (state->start <= end && state->end > end) {
790 set = state->state & bits;
791 if (exclusive && set) {
792 *failed_start = start;
796 err = split_state(tree, state, prealloc, end + 1);
797 BUG_ON(err == -EEXIST);
799 prealloc->state |= bits;
800 merge_state(tree, prealloc);
808 write_unlock_irqrestore(&tree->lock, flags);
810 free_extent_state(prealloc);
817 write_unlock_irqrestore(&tree->lock, flags);
818 if (mask & __GFP_WAIT)
822 EXPORT_SYMBOL(set_extent_bit);
824 /* wrappers around set/clear extent bit */
825 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
828 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
831 EXPORT_SYMBOL(set_extent_dirty);
833 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
834 int bits, gfp_t mask)
836 return set_extent_bit(tree, start, end, bits, 0, NULL,
839 EXPORT_SYMBOL(set_extent_bits);
841 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
842 int bits, gfp_t mask)
844 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
846 EXPORT_SYMBOL(clear_extent_bits);
848 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
851 return set_extent_bit(tree, start, end,
852 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
855 EXPORT_SYMBOL(set_extent_delalloc);
857 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
860 return clear_extent_bit(tree, start, end,
861 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
863 EXPORT_SYMBOL(clear_extent_dirty);
865 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
868 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
871 EXPORT_SYMBOL(set_extent_new);
873 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
876 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
878 EXPORT_SYMBOL(clear_extent_new);
880 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
883 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
886 EXPORT_SYMBOL(set_extent_uptodate);
888 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
891 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
893 EXPORT_SYMBOL(clear_extent_uptodate);
895 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
898 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
901 EXPORT_SYMBOL(set_extent_writeback);
903 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
906 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
908 EXPORT_SYMBOL(clear_extent_writeback);
910 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
912 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
914 EXPORT_SYMBOL(wait_on_extent_writeback);
917 * locks a range in ascending order, waiting for any locked regions
918 * it hits on the way. [start,end] are inclusive, and this will sleep.
920 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
925 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
926 &failed_start, mask);
927 if (err == -EEXIST && (mask & __GFP_WAIT)) {
928 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
929 start = failed_start;
933 WARN_ON(start > end);
937 EXPORT_SYMBOL(lock_extent);
939 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
942 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
944 EXPORT_SYMBOL(unlock_extent);
947 * helper function to set pages and extents in the tree dirty
949 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
951 unsigned long index = start >> PAGE_CACHE_SHIFT;
952 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
955 while (index <= end_index) {
956 page = find_get_page(tree->mapping, index);
958 __set_page_dirty_nobuffers(page);
959 page_cache_release(page);
962 set_extent_dirty(tree, start, end, GFP_NOFS);
965 EXPORT_SYMBOL(set_range_dirty);
968 * helper function to set both pages and extents in the tree writeback
970 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
972 unsigned long index = start >> PAGE_CACHE_SHIFT;
973 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
976 while (index <= end_index) {
977 page = find_get_page(tree->mapping, index);
979 set_page_writeback(page);
980 page_cache_release(page);
983 set_extent_writeback(tree, start, end, GFP_NOFS);
986 EXPORT_SYMBOL(set_range_writeback);
988 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
989 u64 *start_ret, u64 *end_ret, int bits)
991 struct rb_node *node;
992 struct extent_state *state;
995 read_lock_irq(&tree->lock);
997 * this search will find all the extents that end after
1000 node = tree_search(&tree->state, start);
1001 if (!node || IS_ERR(node)) {
1006 state = rb_entry(node, struct extent_state, rb_node);
1007 if (state->end >= start && (state->state & bits)) {
1008 *start_ret = state->start;
1009 *end_ret = state->end;
1013 node = rb_next(node);
1018 read_unlock_irq(&tree->lock);
1021 EXPORT_SYMBOL(find_first_extent_bit);
1023 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1024 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1026 struct rb_node *node;
1027 struct extent_state *state;
1028 u64 cur_start = start;
1030 u64 total_bytes = 0;
1032 write_lock_irq(&tree->lock);
1034 * this search will find all the extents that end after
1038 node = tree_search(&tree->state, cur_start);
1039 if (!node || IS_ERR(node)) {
1044 state = rb_entry(node, struct extent_state, rb_node);
1045 if (state->start != cur_start) {
1048 if (!(state->state & EXTENT_DELALLOC)) {
1051 if (state->start >= lock_start) {
1052 if (state->state & EXTENT_LOCKED) {
1054 atomic_inc(&state->refs);
1055 prepare_to_wait(&state->wq, &wait,
1056 TASK_UNINTERRUPTIBLE);
1057 write_unlock_irq(&tree->lock);
1059 write_lock_irq(&tree->lock);
1060 finish_wait(&state->wq, &wait);
1061 free_extent_state(state);
1064 state->state |= EXTENT_LOCKED;
1068 cur_start = state->end + 1;
1069 node = rb_next(node);
1072 total_bytes += state->end - state->start + 1;
1073 if (total_bytes >= max_bytes)
1077 write_unlock_irq(&tree->lock);
1082 * helper function to lock both pages and extents in the tree.
1083 * pages must be locked first.
1085 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1087 unsigned long index = start >> PAGE_CACHE_SHIFT;
1088 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1092 while (index <= end_index) {
1093 page = grab_cache_page(tree->mapping, index);
1099 err = PTR_ERR(page);
1104 lock_extent(tree, start, end, GFP_NOFS);
1109 * we failed above in getting the page at 'index', so we undo here
1110 * up to but not including the page at 'index'
1113 index = start >> PAGE_CACHE_SHIFT;
1114 while (index < end_index) {
1115 page = find_get_page(tree->mapping, index);
1117 page_cache_release(page);
1122 EXPORT_SYMBOL(lock_range);
1125 * helper function to unlock both pages and extents in the tree.
1127 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1129 unsigned long index = start >> PAGE_CACHE_SHIFT;
1130 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1133 while (index <= end_index) {
1134 page = find_get_page(tree->mapping, index);
1136 page_cache_release(page);
1139 unlock_extent(tree, start, end, GFP_NOFS);
1142 EXPORT_SYMBOL(unlock_range);
1144 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1146 struct rb_node *node;
1147 struct extent_state *state;
1150 write_lock_irq(&tree->lock);
1152 * this search will find all the extents that end after
1155 node = tree_search(&tree->state, start);
1156 if (!node || IS_ERR(node)) {
1160 state = rb_entry(node, struct extent_state, rb_node);
1161 if (state->start != start) {
1165 state->private = private;
1167 write_unlock_irq(&tree->lock);
1171 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1173 struct rb_node *node;
1174 struct extent_state *state;
1177 read_lock_irq(&tree->lock);
1179 * this search will find all the extents that end after
1182 node = tree_search(&tree->state, start);
1183 if (!node || IS_ERR(node)) {
1187 state = rb_entry(node, struct extent_state, rb_node);
1188 if (state->start != start) {
1192 *private = state->private;
1194 read_unlock_irq(&tree->lock);
1199 * searches a range in the state tree for a given mask.
1200 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1201 * has the bits set. Otherwise, 1 is returned if any bit in the
1202 * range is found set.
1204 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1205 int bits, int filled)
1207 struct extent_state *state = NULL;
1208 struct rb_node *node;
1211 read_lock_irq(&tree->lock);
1212 node = tree_search(&tree->state, start);
1213 while (node && start <= end) {
1214 state = rb_entry(node, struct extent_state, rb_node);
1215 if (state->start > end)
1218 if (filled && state->start > start) {
1222 if (state->state & bits) {
1226 } else if (filled) {
1230 start = state->end + 1;
1233 node = rb_next(node);
1235 read_unlock_irq(&tree->lock);
1238 EXPORT_SYMBOL(test_range_bit);
1241 * helper function to set a given page up to date if all the
1242 * extents in the tree for that page are up to date
1244 static int check_page_uptodate(struct extent_map_tree *tree,
1247 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1248 u64 end = start + PAGE_CACHE_SIZE - 1;
1249 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1250 SetPageUptodate(page);
1255 * helper function to unlock a page if all the extents in the tree
1256 * for that page are unlocked
1258 static int check_page_locked(struct extent_map_tree *tree,
1261 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1262 u64 end = start + PAGE_CACHE_SIZE - 1;
1263 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1269 * helper function to end page writeback if all the extents
1270 * in the tree for that page are done with writeback
1272 static int check_page_writeback(struct extent_map_tree *tree,
1275 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1276 u64 end = start + PAGE_CACHE_SIZE - 1;
1277 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1278 end_page_writeback(page);
1282 /* lots and lots of room for performance fixes in the end_bio funcs */
1285 * after a writepage IO is done, we need to:
1286 * clear the uptodate bits on error
1287 * clear the writeback bits in the extent tree for this IO
1288 * end_page_writeback if the page has no more pending IO
1290 * Scheduling is not allowed, so the extent state tree is expected
1291 * to have one and only one object corresponding to this IO.
1293 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1294 static void end_bio_extent_writepage(struct bio *bio, int err)
1296 static int end_bio_extent_writepage(struct bio *bio,
1297 unsigned int bytes_done, int err)
1300 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1301 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1302 struct extent_map_tree *tree = bio->bi_private;
1307 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1313 struct page *page = bvec->bv_page;
1314 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1316 end = start + bvec->bv_len - 1;
1318 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1323 if (--bvec >= bio->bi_io_vec)
1324 prefetchw(&bvec->bv_page->flags);
1327 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1328 ClearPageUptodate(page);
1331 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1334 end_page_writeback(page);
1336 check_page_writeback(tree, page);
1337 if (tree->ops && tree->ops->writepage_end_io_hook)
1338 tree->ops->writepage_end_io_hook(page, start, end);
1339 } while (bvec >= bio->bi_io_vec);
1342 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1348 * after a readpage IO is done, we need to:
1349 * clear the uptodate bits on error
1350 * set the uptodate bits if things worked
1351 * set the page up to date if all extents in the tree are uptodate
1352 * clear the lock bit in the extent tree
1353 * unlock the page if there are no other extents locked for it
1355 * Scheduling is not allowed, so the extent state tree is expected
1356 * to have one and only one object corresponding to this IO.
1358 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1359 static void end_bio_extent_readpage(struct bio *bio, int err)
1361 static int end_bio_extent_readpage(struct bio *bio,
1362 unsigned int bytes_done, int err)
1365 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1366 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1367 struct extent_map_tree *tree = bio->bi_private;
1373 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1379 struct page *page = bvec->bv_page;
1380 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1382 end = start + bvec->bv_len - 1;
1384 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1389 if (--bvec >= bio->bi_io_vec)
1390 prefetchw(&bvec->bv_page->flags);
1392 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1393 ret = tree->ops->readpage_end_io_hook(page, start, end);
1398 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1400 SetPageUptodate(page);
1402 check_page_uptodate(tree, page);
1404 ClearPageUptodate(page);
1408 unlock_extent(tree, start, end, GFP_ATOMIC);
1413 check_page_locked(tree, page);
1414 } while (bvec >= bio->bi_io_vec);
1417 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1423 * IO done from prepare_write is pretty simple, we just unlock
1424 * the structs in the extent tree when done, and set the uptodate bits
1427 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1428 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1430 static int end_bio_extent_preparewrite(struct bio *bio,
1431 unsigned int bytes_done, int err)
1434 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1435 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1436 struct extent_map_tree *tree = bio->bi_private;
1440 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1446 struct page *page = bvec->bv_page;
1447 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1449 end = start + bvec->bv_len - 1;
1451 if (--bvec >= bio->bi_io_vec)
1452 prefetchw(&bvec->bv_page->flags);
1455 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1457 ClearPageUptodate(page);
1461 unlock_extent(tree, start, end, GFP_ATOMIC);
1463 } while (bvec >= bio->bi_io_vec);
1466 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1472 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1477 bio = bio_alloc(gfp_flags, nr_vecs);
1479 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1480 while (!bio && (nr_vecs /= 2))
1481 bio = bio_alloc(gfp_flags, nr_vecs);
1485 bio->bi_bdev = bdev;
1486 bio->bi_sector = first_sector;
1491 static int submit_one_bio(int rw, struct bio *bio)
1495 submit_bio(rw, bio);
1496 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1502 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1503 struct page *page, sector_t sector,
1504 size_t size, unsigned long offset,
1505 struct block_device *bdev,
1506 struct bio **bio_ret,
1507 unsigned long max_pages,
1508 bio_end_io_t end_io_func)
1514 if (bio_ret && *bio_ret) {
1516 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1517 bio_add_page(bio, page, size, offset) < size) {
1518 ret = submit_one_bio(rw, bio);
1524 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1525 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1527 printk("failed to allocate bio nr %d\n", nr);
1529 bio_add_page(bio, page, size, offset);
1530 bio->bi_end_io = end_io_func;
1531 bio->bi_private = tree;
1535 ret = submit_one_bio(rw, bio);
1541 void set_page_extent_mapped(struct page *page)
1543 if (!PagePrivate(page)) {
1544 SetPagePrivate(page);
1545 WARN_ON(!page->mapping->a_ops->invalidatepage);
1546 set_page_private(page, EXTENT_PAGE_PRIVATE);
1547 page_cache_get(page);
1552 * basic readpage implementation. Locked extent state structs are inserted
1553 * into the tree that are removed when the IO is done (by the end_io
1556 static int __extent_read_full_page(struct extent_map_tree *tree,
1558 get_extent_t *get_extent,
1561 struct inode *inode = page->mapping->host;
1562 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1563 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1567 u64 last_byte = i_size_read(inode);
1571 struct extent_map *em;
1572 struct block_device *bdev;
1575 size_t page_offset = 0;
1577 size_t blocksize = inode->i_sb->s_blocksize;
1579 set_page_extent_mapped(page);
1582 lock_extent(tree, start, end, GFP_NOFS);
1584 while (cur <= end) {
1585 if (cur >= last_byte) {
1586 iosize = PAGE_CACHE_SIZE - page_offset;
1587 zero_user_page(page, page_offset, iosize, KM_USER0);
1588 set_extent_uptodate(tree, cur, cur + iosize - 1,
1590 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1593 em = get_extent(inode, page, page_offset, cur, end, 0);
1594 if (IS_ERR(em) || !em) {
1596 unlock_extent(tree, cur, end, GFP_NOFS);
1600 extent_offset = cur - em->start;
1601 BUG_ON(em->end < cur);
1604 iosize = min(em->end - cur, end - cur) + 1;
1605 cur_end = min(em->end, end);
1606 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1607 sector = (em->block_start + extent_offset) >> 9;
1609 block_start = em->block_start;
1610 free_extent_map(em);
1613 /* we've found a hole, just zero and go on */
1614 if (block_start == EXTENT_MAP_HOLE) {
1615 zero_user_page(page, page_offset, iosize, KM_USER0);
1616 set_extent_uptodate(tree, cur, cur + iosize - 1,
1618 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1620 page_offset += iosize;
1623 /* the get_extent function already copied into the page */
1624 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1625 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1627 page_offset += iosize;
1632 if (tree->ops && tree->ops->readpage_io_hook) {
1633 ret = tree->ops->readpage_io_hook(page, cur,
1637 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1639 ret = submit_extent_page(READ, tree, page,
1640 sector, iosize, page_offset,
1642 end_bio_extent_readpage);
1647 page_offset += iosize;
1651 if (!PageError(page))
1652 SetPageUptodate(page);
1658 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1659 get_extent_t *get_extent)
1661 struct bio *bio = NULL;
1664 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1666 submit_one_bio(READ, bio);
1669 EXPORT_SYMBOL(extent_read_full_page);
1672 * the writepage semantics are similar to regular writepage. extent
1673 * records are inserted to lock ranges in the tree, and as dirty areas
1674 * are found, they are marked writeback. Then the lock bits are removed
1675 * and the end_io handler clears the writeback ranges
1677 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1680 struct inode *inode = page->mapping->host;
1681 struct extent_page_data *epd = data;
1682 struct extent_map_tree *tree = epd->tree;
1683 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1684 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1688 u64 last_byte = i_size_read(inode);
1692 struct extent_map *em;
1693 struct block_device *bdev;
1696 size_t page_offset = 0;
1698 loff_t i_size = i_size_read(inode);
1699 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1703 WARN_ON(!PageLocked(page));
1704 if (page->index > end_index) {
1705 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1710 if (page->index == end_index) {
1711 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1712 zero_user_page(page, offset,
1713 PAGE_CACHE_SIZE - offset, KM_USER0);
1716 set_page_extent_mapped(page);
1718 lock_extent(tree, start, page_end, GFP_NOFS);
1719 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1723 tree->ops->fill_delalloc(inode, start, delalloc_end);
1724 if (delalloc_end >= page_end + 1) {
1725 clear_extent_bit(tree, page_end + 1, delalloc_end,
1726 EXTENT_LOCKED | EXTENT_DELALLOC,
1729 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1731 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1732 printk("found delalloc bits after clear extent_bit\n");
1734 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1735 printk("found delalloc bits after find_delalloc_range returns 0\n");
1739 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1740 printk("found delalloc bits after lock_extent\n");
1743 if (last_byte <= start) {
1744 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1748 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1749 blocksize = inode->i_sb->s_blocksize;
1751 while (cur <= end) {
1752 if (cur >= last_byte) {
1753 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1756 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1757 if (IS_ERR(em) || !em) {
1762 extent_offset = cur - em->start;
1763 BUG_ON(em->end < cur);
1765 iosize = min(em->end - cur, end - cur) + 1;
1766 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1767 sector = (em->block_start + extent_offset) >> 9;
1769 block_start = em->block_start;
1770 free_extent_map(em);
1773 if (block_start == EXTENT_MAP_HOLE ||
1774 block_start == EXTENT_MAP_INLINE) {
1775 clear_extent_dirty(tree, cur,
1776 cur + iosize - 1, GFP_NOFS);
1778 page_offset += iosize;
1782 /* leave this out until we have a page_mkwrite call */
1783 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1786 page_offset += iosize;
1789 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1790 if (tree->ops && tree->ops->writepage_io_hook) {
1791 ret = tree->ops->writepage_io_hook(page, cur,
1799 unsigned long nr = end_index + 1;
1800 set_range_writeback(tree, cur, cur + iosize - 1);
1802 ret = submit_extent_page(WRITE, tree, page, sector,
1803 iosize, page_offset, bdev,
1805 end_bio_extent_writepage);
1810 page_offset += iosize;
1814 unlock_extent(tree, start, page_end, GFP_NOFS);
1819 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1820 get_extent_t *get_extent,
1821 struct writeback_control *wbc)
1824 struct extent_page_data epd = {
1827 .get_extent = get_extent,
1830 ret = __extent_writepage(page, wbc, &epd);
1832 submit_one_bio(WRITE, epd.bio);
1835 EXPORT_SYMBOL(extent_write_full_page);
1837 int extent_writepages(struct extent_map_tree *tree,
1838 struct address_space *mapping,
1839 get_extent_t *get_extent,
1840 struct writeback_control *wbc)
1843 struct extent_page_data epd = {
1846 .get_extent = get_extent,
1849 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1851 submit_one_bio(WRITE, epd.bio);
1854 EXPORT_SYMBOL(extent_writepages);
1856 int extent_readpages(struct extent_map_tree *tree,
1857 struct address_space *mapping,
1858 struct list_head *pages, unsigned nr_pages,
1859 get_extent_t get_extent)
1861 struct bio *bio = NULL;
1863 struct pagevec pvec;
1865 pagevec_init(&pvec, 0);
1866 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1867 struct page *page = list_entry(pages->prev, struct page, lru);
1869 prefetchw(&page->flags);
1870 list_del(&page->lru);
1872 * what we want to do here is call add_to_page_cache_lru,
1873 * but that isn't exported, so we reproduce it here
1875 if (!add_to_page_cache(page, mapping,
1876 page->index, GFP_KERNEL)) {
1878 /* open coding of lru_cache_add, also not exported */
1879 page_cache_get(page);
1880 if (!pagevec_add(&pvec, page))
1881 __pagevec_lru_add(&pvec);
1882 __extent_read_full_page(tree, page, get_extent, &bio);
1884 page_cache_release(page);
1886 if (pagevec_count(&pvec))
1887 __pagevec_lru_add(&pvec);
1888 BUG_ON(!list_empty(pages));
1890 submit_one_bio(READ, bio);
1893 EXPORT_SYMBOL(extent_readpages);
1896 * basic invalidatepage code, this waits on any locked or writeback
1897 * ranges corresponding to the page, and then deletes any extent state
1898 * records from the tree
1900 int extent_invalidatepage(struct extent_map_tree *tree,
1901 struct page *page, unsigned long offset)
1903 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1904 u64 end = start + PAGE_CACHE_SIZE - 1;
1905 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1907 start += (offset + blocksize -1) & ~(blocksize - 1);
1911 lock_extent(tree, start, end, GFP_NOFS);
1912 wait_on_extent_writeback(tree, start, end);
1913 clear_extent_bit(tree, start, end,
1914 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1918 EXPORT_SYMBOL(extent_invalidatepage);
1921 * simple commit_write call, set_range_dirty is used to mark both
1922 * the pages and the extent records as dirty
1924 int extent_commit_write(struct extent_map_tree *tree,
1925 struct inode *inode, struct page *page,
1926 unsigned from, unsigned to)
1928 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1930 set_page_extent_mapped(page);
1931 set_page_dirty(page);
1933 if (pos > inode->i_size) {
1934 i_size_write(inode, pos);
1935 mark_inode_dirty(inode);
1939 EXPORT_SYMBOL(extent_commit_write);
1941 int extent_prepare_write(struct extent_map_tree *tree,
1942 struct inode *inode, struct page *page,
1943 unsigned from, unsigned to, get_extent_t *get_extent)
1945 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1946 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1948 u64 orig_block_start;
1951 struct extent_map *em;
1952 unsigned blocksize = 1 << inode->i_blkbits;
1953 size_t page_offset = 0;
1954 size_t block_off_start;
1955 size_t block_off_end;
1961 set_page_extent_mapped(page);
1963 block_start = (page_start + from) & ~((u64)blocksize - 1);
1964 block_end = (page_start + to - 1) | (blocksize - 1);
1965 orig_block_start = block_start;
1967 lock_extent(tree, page_start, page_end, GFP_NOFS);
1968 while(block_start <= block_end) {
1969 em = get_extent(inode, page, page_offset, block_start,
1971 if (IS_ERR(em) || !em) {
1974 cur_end = min(block_end, em->end);
1975 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1976 block_off_end = block_off_start + blocksize;
1977 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1979 if (!PageUptodate(page) && isnew &&
1980 (block_off_end > to || block_off_start < from)) {
1983 kaddr = kmap_atomic(page, KM_USER0);
1984 if (block_off_end > to)
1985 memset(kaddr + to, 0, block_off_end - to);
1986 if (block_off_start < from)
1987 memset(kaddr + block_off_start, 0,
1988 from - block_off_start);
1989 flush_dcache_page(page);
1990 kunmap_atomic(kaddr, KM_USER0);
1992 if (!isnew && !PageUptodate(page) &&
1993 (block_off_end > to || block_off_start < from) &&
1994 !test_range_bit(tree, block_start, cur_end,
1995 EXTENT_UPTODATE, 1)) {
1997 u64 extent_offset = block_start - em->start;
1999 sector = (em->block_start + extent_offset) >> 9;
2000 iosize = (cur_end - block_start + blocksize - 1) &
2001 ~((u64)blocksize - 1);
2003 * we've already got the extent locked, but we
2004 * need to split the state such that our end_bio
2005 * handler can clear the lock.
2007 set_extent_bit(tree, block_start,
2008 block_start + iosize - 1,
2009 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2010 ret = submit_extent_page(READ, tree, page,
2011 sector, iosize, page_offset, em->bdev,
2013 end_bio_extent_preparewrite);
2015 block_start = block_start + iosize;
2017 set_extent_uptodate(tree, block_start, cur_end,
2019 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2020 block_start = cur_end + 1;
2022 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2023 free_extent_map(em);
2026 wait_extent_bit(tree, orig_block_start,
2027 block_end, EXTENT_LOCKED);
2029 check_page_uptodate(tree, page);
2031 /* FIXME, zero out newly allocated blocks on error */
2034 EXPORT_SYMBOL(extent_prepare_write);
2037 * a helper for releasepage. As long as there are no locked extents
2038 * in the range corresponding to the page, both state records and extent
2039 * map records are removed
2041 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2043 struct extent_map *em;
2044 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2045 u64 end = start + PAGE_CACHE_SIZE - 1;
2046 u64 orig_start = start;
2049 while (start <= end) {
2050 em = lookup_extent_mapping(tree, start, end);
2051 if (!em || IS_ERR(em))
2053 if (!test_range_bit(tree, em->start, em->end,
2054 EXTENT_LOCKED, 0)) {
2055 remove_extent_mapping(tree, em);
2056 /* once for the rb tree */
2057 free_extent_map(em);
2059 start = em->end + 1;
2061 free_extent_map(em);
2063 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2066 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2070 EXPORT_SYMBOL(try_release_extent_mapping);
2072 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2073 get_extent_t *get_extent)
2075 struct inode *inode = mapping->host;
2076 u64 start = iblock << inode->i_blkbits;
2077 u64 end = start + (1 << inode->i_blkbits) - 1;
2078 sector_t sector = 0;
2079 struct extent_map *em;
2081 em = get_extent(inode, NULL, 0, start, end, 0);
2082 if (!em || IS_ERR(em))
2085 if (em->block_start == EXTENT_MAP_INLINE ||
2086 em->block_start == EXTENT_MAP_HOLE)
2089 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2091 free_extent_map(em);
2095 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2097 if (list_empty(&eb->lru)) {
2098 extent_buffer_get(eb);
2099 list_add(&eb->lru, &tree->buffer_lru);
2101 if (tree->lru_size >= BUFFER_LRU_MAX) {
2102 struct extent_buffer *rm;
2103 rm = list_entry(tree->buffer_lru.prev,
2104 struct extent_buffer, lru);
2106 list_del_init(&rm->lru);
2107 free_extent_buffer(rm);
2110 list_move(&eb->lru, &tree->buffer_lru);
2113 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2114 u64 start, unsigned long len)
2116 struct list_head *lru = &tree->buffer_lru;
2117 struct list_head *cur = lru->next;
2118 struct extent_buffer *eb;
2120 if (list_empty(lru))
2124 eb = list_entry(cur, struct extent_buffer, lru);
2125 if (eb->start == start && eb->len == len) {
2126 extent_buffer_get(eb);
2130 } while (cur != lru);
2134 static inline unsigned long num_extent_pages(u64 start, u64 len)
2136 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2137 (start >> PAGE_CACHE_SHIFT);
2140 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2144 struct address_space *mapping;
2147 return eb->first_page;
2148 i += eb->start >> PAGE_CACHE_SHIFT;
2149 mapping = eb->first_page->mapping;
2150 read_lock_irq(&mapping->tree_lock);
2151 p = radix_tree_lookup(&mapping->page_tree, i);
2152 read_unlock_irq(&mapping->tree_lock);
2156 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2161 struct extent_buffer *eb = NULL;
2163 spin_lock(&tree->lru_lock);
2164 eb = find_lru(tree, start, len);
2165 spin_unlock(&tree->lru_lock);
2170 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2171 INIT_LIST_HEAD(&eb->lru);
2174 atomic_set(&eb->refs, 1);
2179 static void __free_extent_buffer(struct extent_buffer *eb)
2181 kmem_cache_free(extent_buffer_cache, eb);
2184 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2185 u64 start, unsigned long len,
2189 unsigned long num_pages = num_extent_pages(start, len);
2191 unsigned long index = start >> PAGE_CACHE_SHIFT;
2192 struct extent_buffer *eb;
2194 struct address_space *mapping = tree->mapping;
2197 eb = __alloc_extent_buffer(tree, start, len, mask);
2198 if (!eb || IS_ERR(eb))
2201 if (eb->flags & EXTENT_BUFFER_FILLED)
2205 eb->first_page = page0;
2208 page_cache_get(page0);
2209 mark_page_accessed(page0);
2210 set_page_extent_mapped(page0);
2211 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2216 for (; i < num_pages; i++, index++) {
2217 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2222 set_page_extent_mapped(p);
2223 mark_page_accessed(p);
2226 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2229 set_page_private(p, EXTENT_PAGE_PRIVATE);
2231 if (!PageUptodate(p))
2236 eb->flags |= EXTENT_UPTODATE;
2237 eb->flags |= EXTENT_BUFFER_FILLED;
2240 spin_lock(&tree->lru_lock);
2242 spin_unlock(&tree->lru_lock);
2246 spin_lock(&tree->lru_lock);
2247 list_del_init(&eb->lru);
2248 spin_unlock(&tree->lru_lock);
2249 if (!atomic_dec_and_test(&eb->refs))
2251 for (index = 0; index < i; index++) {
2252 page_cache_release(extent_buffer_page(eb, index));
2254 __free_extent_buffer(eb);
2257 EXPORT_SYMBOL(alloc_extent_buffer);
2259 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2260 u64 start, unsigned long len,
2263 unsigned long num_pages = num_extent_pages(start, len);
2265 unsigned long index = start >> PAGE_CACHE_SHIFT;
2266 struct extent_buffer *eb;
2268 struct address_space *mapping = tree->mapping;
2271 eb = __alloc_extent_buffer(tree, start, len, mask);
2272 if (!eb || IS_ERR(eb))
2275 if (eb->flags & EXTENT_BUFFER_FILLED)
2278 for (i = 0; i < num_pages; i++, index++) {
2279 p = find_lock_page(mapping, index);
2283 set_page_extent_mapped(p);
2284 mark_page_accessed(p);
2288 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2291 set_page_private(p, EXTENT_PAGE_PRIVATE);
2294 if (!PageUptodate(p))
2299 eb->flags |= EXTENT_UPTODATE;
2300 eb->flags |= EXTENT_BUFFER_FILLED;
2303 spin_lock(&tree->lru_lock);
2305 spin_unlock(&tree->lru_lock);
2308 spin_lock(&tree->lru_lock);
2309 list_del_init(&eb->lru);
2310 spin_unlock(&tree->lru_lock);
2311 if (!atomic_dec_and_test(&eb->refs))
2313 for (index = 0; index < i; index++) {
2314 page_cache_release(extent_buffer_page(eb, index));
2316 __free_extent_buffer(eb);
2319 EXPORT_SYMBOL(find_extent_buffer);
2321 void free_extent_buffer(struct extent_buffer *eb)
2324 unsigned long num_pages;
2329 if (!atomic_dec_and_test(&eb->refs))
2332 num_pages = num_extent_pages(eb->start, eb->len);
2334 for (i = 0; i < num_pages; i++) {
2335 page_cache_release(extent_buffer_page(eb, i));
2337 __free_extent_buffer(eb);
2339 EXPORT_SYMBOL(free_extent_buffer);
2341 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2342 struct extent_buffer *eb)
2346 unsigned long num_pages;
2349 u64 start = eb->start;
2350 u64 end = start + eb->len - 1;
2352 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2353 num_pages = num_extent_pages(eb->start, eb->len);
2355 for (i = 0; i < num_pages; i++) {
2356 page = extent_buffer_page(eb, i);
2359 * if we're on the last page or the first page and the
2360 * block isn't aligned on a page boundary, do extra checks
2361 * to make sure we don't clean page that is partially dirty
2363 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2364 ((i == num_pages - 1) &&
2365 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2366 start = (u64)page->index << PAGE_CACHE_SHIFT;
2367 end = start + PAGE_CACHE_SIZE - 1;
2368 if (test_range_bit(tree, start, end,
2374 clear_page_dirty_for_io(page);
2379 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2381 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2382 struct extent_buffer *eb)
2384 return wait_on_extent_writeback(tree, eb->start,
2385 eb->start + eb->len - 1);
2387 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2389 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2390 struct extent_buffer *eb)
2393 unsigned long num_pages;
2395 num_pages = num_extent_pages(eb->start, eb->len);
2396 for (i = 0; i < num_pages; i++) {
2397 struct page *page = extent_buffer_page(eb, i);
2398 /* writepage may need to do something special for the
2399 * first page, we have to make sure page->private is
2400 * properly set. releasepage may drop page->private
2401 * on us if the page isn't already dirty.
2405 set_page_private(page,
2406 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2409 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2413 return set_extent_dirty(tree, eb->start,
2414 eb->start + eb->len - 1, GFP_NOFS);
2416 EXPORT_SYMBOL(set_extent_buffer_dirty);
2418 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2419 struct extent_buffer *eb)
2423 unsigned long num_pages;
2425 num_pages = num_extent_pages(eb->start, eb->len);
2427 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2429 for (i = 0; i < num_pages; i++) {
2430 page = extent_buffer_page(eb, i);
2431 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2432 ((i == num_pages - 1) &&
2433 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2434 check_page_uptodate(tree, page);
2437 SetPageUptodate(page);
2441 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2443 int extent_buffer_uptodate(struct extent_map_tree *tree,
2444 struct extent_buffer *eb)
2446 if (eb->flags & EXTENT_UPTODATE)
2448 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2449 EXTENT_UPTODATE, 1);
2451 EXPORT_SYMBOL(extent_buffer_uptodate);
2453 int read_extent_buffer_pages(struct extent_map_tree *tree,
2454 struct extent_buffer *eb,
2459 unsigned long start_i;
2463 unsigned long num_pages;
2465 if (eb->flags & EXTENT_UPTODATE)
2468 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2469 EXTENT_UPTODATE, 1)) {
2473 WARN_ON(start < eb->start);
2474 start_i = (start >> PAGE_CACHE_SHIFT) -
2475 (eb->start >> PAGE_CACHE_SHIFT);
2480 num_pages = num_extent_pages(eb->start, eb->len);
2481 for (i = start_i; i < num_pages; i++) {
2482 page = extent_buffer_page(eb, i);
2483 if (PageUptodate(page)) {
2487 if (TestSetPageLocked(page)) {
2493 if (!PageUptodate(page)) {
2494 err = page->mapping->a_ops->readpage(NULL, page);
2507 for (i = start_i; i < num_pages; i++) {
2508 page = extent_buffer_page(eb, i);
2509 wait_on_page_locked(page);
2510 if (!PageUptodate(page)) {
2515 eb->flags |= EXTENT_UPTODATE;
2518 EXPORT_SYMBOL(read_extent_buffer_pages);
2520 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2521 unsigned long start,
2528 char *dst = (char *)dstv;
2529 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2530 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2531 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2533 WARN_ON(start > eb->len);
2534 WARN_ON(start + len > eb->start + eb->len);
2536 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2539 page = extent_buffer_page(eb, i);
2540 if (!PageUptodate(page)) {
2541 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2544 WARN_ON(!PageUptodate(page));
2546 cur = min(len, (PAGE_CACHE_SIZE - offset));
2547 kaddr = kmap_atomic(page, KM_USER1);
2548 memcpy(dst, kaddr + offset, cur);
2549 kunmap_atomic(kaddr, KM_USER1);
2557 EXPORT_SYMBOL(read_extent_buffer);
2559 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2560 unsigned long min_len, char **token, char **map,
2561 unsigned long *map_start,
2562 unsigned long *map_len, int km)
2564 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2567 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2568 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2569 unsigned long end_i = (start_offset + start + min_len - 1) >>
2576 offset = start_offset;
2580 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2582 if (start + min_len > eb->len) {
2583 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2587 p = extent_buffer_page(eb, i);
2588 WARN_ON(!PageUptodate(p));
2589 kaddr = kmap_atomic(p, km);
2591 *map = kaddr + offset;
2592 *map_len = PAGE_CACHE_SIZE - offset;
2595 EXPORT_SYMBOL(map_private_extent_buffer);
2597 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2598 unsigned long min_len,
2599 char **token, char **map,
2600 unsigned long *map_start,
2601 unsigned long *map_len, int km)
2605 if (eb->map_token) {
2606 unmap_extent_buffer(eb, eb->map_token, km);
2607 eb->map_token = NULL;
2610 err = map_private_extent_buffer(eb, start, min_len, token, map,
2611 map_start, map_len, km);
2613 eb->map_token = *token;
2615 eb->map_start = *map_start;
2616 eb->map_len = *map_len;
2620 EXPORT_SYMBOL(map_extent_buffer);
2622 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2624 kunmap_atomic(token, km);
2626 EXPORT_SYMBOL(unmap_extent_buffer);
2628 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2629 unsigned long start,
2636 char *ptr = (char *)ptrv;
2637 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2638 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2641 WARN_ON(start > eb->len);
2642 WARN_ON(start + len > eb->start + eb->len);
2644 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2647 page = extent_buffer_page(eb, i);
2648 WARN_ON(!PageUptodate(page));
2650 cur = min(len, (PAGE_CACHE_SIZE - offset));
2652 kaddr = kmap_atomic(page, KM_USER0);
2653 ret = memcmp(ptr, kaddr + offset, cur);
2654 kunmap_atomic(kaddr, KM_USER0);
2665 EXPORT_SYMBOL(memcmp_extent_buffer);
2667 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2668 unsigned long start, unsigned long len)
2674 char *src = (char *)srcv;
2675 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2676 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2678 WARN_ON(start > eb->len);
2679 WARN_ON(start + len > eb->start + eb->len);
2681 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2684 page = extent_buffer_page(eb, i);
2685 WARN_ON(!PageUptodate(page));
2687 cur = min(len, PAGE_CACHE_SIZE - offset);
2688 kaddr = kmap_atomic(page, KM_USER1);
2689 memcpy(kaddr + offset, src, cur);
2690 kunmap_atomic(kaddr, KM_USER1);
2698 EXPORT_SYMBOL(write_extent_buffer);
2700 void memset_extent_buffer(struct extent_buffer *eb, char c,
2701 unsigned long start, unsigned long len)
2707 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2708 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2710 WARN_ON(start > eb->len);
2711 WARN_ON(start + len > eb->start + eb->len);
2713 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2716 page = extent_buffer_page(eb, i);
2717 WARN_ON(!PageUptodate(page));
2719 cur = min(len, PAGE_CACHE_SIZE - offset);
2720 kaddr = kmap_atomic(page, KM_USER0);
2721 memset(kaddr + offset, c, cur);
2722 kunmap_atomic(kaddr, KM_USER0);
2729 EXPORT_SYMBOL(memset_extent_buffer);
2731 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2732 unsigned long dst_offset, unsigned long src_offset,
2735 u64 dst_len = dst->len;
2740 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2741 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2743 WARN_ON(src->len != dst_len);
2745 offset = (start_offset + dst_offset) &
2746 ((unsigned long)PAGE_CACHE_SIZE - 1);
2749 page = extent_buffer_page(dst, i);
2750 WARN_ON(!PageUptodate(page));
2752 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2754 kaddr = kmap_atomic(page, KM_USER0);
2755 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2756 kunmap_atomic(kaddr, KM_USER0);
2764 EXPORT_SYMBOL(copy_extent_buffer);
2766 static void move_pages(struct page *dst_page, struct page *src_page,
2767 unsigned long dst_off, unsigned long src_off,
2770 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2771 if (dst_page == src_page) {
2772 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2774 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2775 char *p = dst_kaddr + dst_off + len;
2776 char *s = src_kaddr + src_off + len;
2781 kunmap_atomic(src_kaddr, KM_USER1);
2783 kunmap_atomic(dst_kaddr, KM_USER0);
2786 static void copy_pages(struct page *dst_page, struct page *src_page,
2787 unsigned long dst_off, unsigned long src_off,
2790 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2793 if (dst_page != src_page)
2794 src_kaddr = kmap_atomic(src_page, KM_USER1);
2796 src_kaddr = dst_kaddr;
2798 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2799 kunmap_atomic(dst_kaddr, KM_USER0);
2800 if (dst_page != src_page)
2801 kunmap_atomic(src_kaddr, KM_USER1);
2804 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2805 unsigned long src_offset, unsigned long len)
2808 size_t dst_off_in_page;
2809 size_t src_off_in_page;
2810 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2811 unsigned long dst_i;
2812 unsigned long src_i;
2814 if (src_offset + len > dst->len) {
2815 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2816 src_offset, len, dst->len);
2819 if (dst_offset + len > dst->len) {
2820 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2821 dst_offset, len, dst->len);
2826 dst_off_in_page = (start_offset + dst_offset) &
2827 ((unsigned long)PAGE_CACHE_SIZE - 1);
2828 src_off_in_page = (start_offset + src_offset) &
2829 ((unsigned long)PAGE_CACHE_SIZE - 1);
2831 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2832 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2834 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2836 cur = min_t(unsigned long, cur,
2837 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2839 copy_pages(extent_buffer_page(dst, dst_i),
2840 extent_buffer_page(dst, src_i),
2841 dst_off_in_page, src_off_in_page, cur);
2848 EXPORT_SYMBOL(memcpy_extent_buffer);
2850 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2851 unsigned long src_offset, unsigned long len)
2854 size_t dst_off_in_page;
2855 size_t src_off_in_page;
2856 unsigned long dst_end = dst_offset + len - 1;
2857 unsigned long src_end = src_offset + len - 1;
2858 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2859 unsigned long dst_i;
2860 unsigned long src_i;
2862 if (src_offset + len > dst->len) {
2863 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2864 src_offset, len, dst->len);
2867 if (dst_offset + len > dst->len) {
2868 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2869 dst_offset, len, dst->len);
2872 if (dst_offset < src_offset) {
2873 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2877 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2878 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2880 dst_off_in_page = (start_offset + dst_end) &
2881 ((unsigned long)PAGE_CACHE_SIZE - 1);
2882 src_off_in_page = (start_offset + src_end) &
2883 ((unsigned long)PAGE_CACHE_SIZE - 1);
2885 cur = min_t(unsigned long, len, src_off_in_page + 1);
2886 cur = min(cur, dst_off_in_page + 1);
2887 move_pages(extent_buffer_page(dst, dst_i),
2888 extent_buffer_page(dst, src_i),
2889 dst_off_in_page - cur + 1,
2890 src_off_in_page - cur + 1, cur);
2897 EXPORT_SYMBOL(memmove_extent_buffer);