2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
43 #include "transaction.h"
44 #include "btrfs_inode.h"
46 #include "print-tree.h"
48 #include "ordered-data.h"
51 #include "ref-cache.h"
52 #include "compression.h"
55 struct btrfs_iget_args {
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
94 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
98 err = btrfs_init_acl(inode, dir);
100 err = btrfs_xattr_security_init(inode, dir);
105 * this does all the hard work for inserting an inline extent into
106 * the btree. The caller should have done a btrfs_drop_extents so that
107 * no overlapping inline items exist in the btree
109 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root, struct inode *inode,
111 u64 start, size_t size, size_t compressed_size,
112 struct page **compressed_pages)
114 struct btrfs_key key;
115 struct btrfs_path *path;
116 struct extent_buffer *leaf;
117 struct page *page = NULL;
120 struct btrfs_file_extent_item *ei;
123 size_t cur_size = size;
125 unsigned long offset;
126 int use_compress = 0;
128 if (compressed_size && compressed_pages) {
130 cur_size = compressed_size;
133 path = btrfs_alloc_path();
137 path->leave_spinning = 1;
138 btrfs_set_trans_block_group(trans, inode);
140 key.objectid = inode->i_ino;
142 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
143 datasize = btrfs_file_extent_calc_inline_size(cur_size);
145 inode_add_bytes(inode, size);
146 ret = btrfs_insert_empty_item(trans, root, path, &key,
153 leaf = path->nodes[0];
154 ei = btrfs_item_ptr(leaf, path->slots[0],
155 struct btrfs_file_extent_item);
156 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
157 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
158 btrfs_set_file_extent_encryption(leaf, ei, 0);
159 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
160 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
161 ptr = btrfs_file_extent_inline_start(ei);
166 while (compressed_size > 0) {
167 cpage = compressed_pages[i];
168 cur_size = min_t(unsigned long, compressed_size,
171 kaddr = kmap_atomic(cpage, KM_USER0);
172 write_extent_buffer(leaf, kaddr, ptr, cur_size);
173 kunmap_atomic(kaddr, KM_USER0);
177 compressed_size -= cur_size;
179 btrfs_set_file_extent_compression(leaf, ei,
180 BTRFS_COMPRESS_ZLIB);
182 page = find_get_page(inode->i_mapping,
183 start >> PAGE_CACHE_SHIFT);
184 btrfs_set_file_extent_compression(leaf, ei, 0);
185 kaddr = kmap_atomic(page, KM_USER0);
186 offset = start & (PAGE_CACHE_SIZE - 1);
187 write_extent_buffer(leaf, kaddr + offset, ptr, size);
188 kunmap_atomic(kaddr, KM_USER0);
189 page_cache_release(page);
191 btrfs_mark_buffer_dirty(leaf);
192 btrfs_free_path(path);
194 BTRFS_I(inode)->disk_i_size = inode->i_size;
195 btrfs_update_inode(trans, root, inode);
198 btrfs_free_path(path);
204 * conditionally insert an inline extent into the file. This
205 * does the checks required to make sure the data is small enough
206 * to fit as an inline extent.
208 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
209 struct btrfs_root *root,
210 struct inode *inode, u64 start, u64 end,
211 size_t compressed_size,
212 struct page **compressed_pages)
214 u64 isize = i_size_read(inode);
215 u64 actual_end = min(end + 1, isize);
216 u64 inline_len = actual_end - start;
217 u64 aligned_end = (end + root->sectorsize - 1) &
218 ~((u64)root->sectorsize - 1);
220 u64 data_len = inline_len;
224 data_len = compressed_size;
227 actual_end >= PAGE_CACHE_SIZE ||
228 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
230 (actual_end & (root->sectorsize - 1)) == 0) ||
232 data_len > root->fs_info->max_inline) {
236 ret = btrfs_drop_extents(trans, root, inode, start,
237 aligned_end, aligned_end, start, &hint_byte);
240 if (isize > actual_end)
241 inline_len = min_t(u64, isize, actual_end);
242 ret = insert_inline_extent(trans, root, inode, start,
243 inline_len, compressed_size,
246 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
250 struct async_extent {
255 unsigned long nr_pages;
256 struct list_head list;
261 struct btrfs_root *root;
262 struct page *locked_page;
265 struct list_head extents;
266 struct btrfs_work work;
269 static noinline int add_async_extent(struct async_cow *cow,
270 u64 start, u64 ram_size,
273 unsigned long nr_pages)
275 struct async_extent *async_extent;
277 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
278 async_extent->start = start;
279 async_extent->ram_size = ram_size;
280 async_extent->compressed_size = compressed_size;
281 async_extent->pages = pages;
282 async_extent->nr_pages = nr_pages;
283 list_add_tail(&async_extent->list, &cow->extents);
288 * we create compressed extents in two phases. The first
289 * phase compresses a range of pages that have already been
290 * locked (both pages and state bits are locked).
292 * This is done inside an ordered work queue, and the compression
293 * is spread across many cpus. The actual IO submission is step
294 * two, and the ordered work queue takes care of making sure that
295 * happens in the same order things were put onto the queue by
296 * writepages and friends.
298 * If this code finds it can't get good compression, it puts an
299 * entry onto the work queue to write the uncompressed bytes. This
300 * makes sure that both compressed inodes and uncompressed inodes
301 * are written in the same order that pdflush sent them down.
303 static noinline int compress_file_range(struct inode *inode,
304 struct page *locked_page,
306 struct async_cow *async_cow,
309 struct btrfs_root *root = BTRFS_I(inode)->root;
310 struct btrfs_trans_handle *trans;
314 u64 blocksize = root->sectorsize;
316 u64 isize = i_size_read(inode);
318 struct page **pages = NULL;
319 unsigned long nr_pages;
320 unsigned long nr_pages_ret = 0;
321 unsigned long total_compressed = 0;
322 unsigned long total_in = 0;
323 unsigned long max_compressed = 128 * 1024;
324 unsigned long max_uncompressed = 128 * 1024;
330 actual_end = min_t(u64, isize, end + 1);
333 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
334 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
337 * we don't want to send crud past the end of i_size through
338 * compression, that's just a waste of CPU time. So, if the
339 * end of the file is before the start of our current
340 * requested range of bytes, we bail out to the uncompressed
341 * cleanup code that can deal with all of this.
343 * It isn't really the fastest way to fix things, but this is a
344 * very uncommon corner.
346 if (actual_end <= start)
347 goto cleanup_and_bail_uncompressed;
349 total_compressed = actual_end - start;
351 /* we want to make sure that amount of ram required to uncompress
352 * an extent is reasonable, so we limit the total size in ram
353 * of a compressed extent to 128k. This is a crucial number
354 * because it also controls how easily we can spread reads across
355 * cpus for decompression.
357 * We also want to make sure the amount of IO required to do
358 * a random read is reasonably small, so we limit the size of
359 * a compressed extent to 128k.
361 total_compressed = min(total_compressed, max_uncompressed);
362 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
363 num_bytes = max(blocksize, num_bytes);
364 disk_num_bytes = num_bytes;
369 * we do compression for mount -o compress and when the
370 * inode has not been flagged as nocompress. This flag can
371 * change at any time if we discover bad compression ratios.
373 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
374 btrfs_test_opt(root, COMPRESS)) {
376 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
378 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
379 total_compressed, pages,
380 nr_pages, &nr_pages_ret,
386 unsigned long offset = total_compressed &
387 (PAGE_CACHE_SIZE - 1);
388 struct page *page = pages[nr_pages_ret - 1];
391 /* zero the tail end of the last page, we might be
392 * sending it down to disk
395 kaddr = kmap_atomic(page, KM_USER0);
396 memset(kaddr + offset, 0,
397 PAGE_CACHE_SIZE - offset);
398 kunmap_atomic(kaddr, KM_USER0);
404 trans = btrfs_join_transaction(root, 1);
406 btrfs_set_trans_block_group(trans, inode);
408 /* lets try to make an inline extent */
409 if (ret || total_in < (actual_end - start)) {
410 /* we didn't compress the entire range, try
411 * to make an uncompressed inline extent.
413 ret = cow_file_range_inline(trans, root, inode,
414 start, end, 0, NULL);
416 /* try making a compressed inline extent */
417 ret = cow_file_range_inline(trans, root, inode,
419 total_compressed, pages);
421 btrfs_end_transaction(trans, root);
424 * inline extent creation worked, we don't need
425 * to create any more async work items. Unlock
426 * and free up our temp pages.
428 extent_clear_unlock_delalloc(inode,
429 &BTRFS_I(inode)->io_tree,
430 start, end, NULL, 1, 0,
439 * we aren't doing an inline extent round the compressed size
440 * up to a block size boundary so the allocator does sane
443 total_compressed = (total_compressed + blocksize - 1) &
447 * one last check to make sure the compression is really a
448 * win, compare the page count read with the blocks on disk
450 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
451 ~(PAGE_CACHE_SIZE - 1);
452 if (total_compressed >= total_in) {
455 disk_num_bytes = total_compressed;
456 num_bytes = total_in;
459 if (!will_compress && pages) {
461 * the compression code ran but failed to make things smaller,
462 * free any pages it allocated and our page pointer array
464 for (i = 0; i < nr_pages_ret; i++) {
465 WARN_ON(pages[i]->mapping);
466 page_cache_release(pages[i]);
470 total_compressed = 0;
473 /* flag the file so we don't compress in the future */
474 btrfs_set_flag(inode, NOCOMPRESS);
479 /* the async work queues will take care of doing actual
480 * allocation on disk for these compressed pages,
481 * and will submit them to the elevator.
483 add_async_extent(async_cow, start, num_bytes,
484 total_compressed, pages, nr_pages_ret);
486 if (start + num_bytes < end && start + num_bytes < actual_end) {
493 cleanup_and_bail_uncompressed:
495 * No compression, but we still need to write the pages in
496 * the file we've been given so far. redirty the locked
497 * page if it corresponds to our extent and set things up
498 * for the async work queue to run cow_file_range to do
499 * the normal delalloc dance
501 if (page_offset(locked_page) >= start &&
502 page_offset(locked_page) <= end) {
503 __set_page_dirty_nobuffers(locked_page);
504 /* unlocked later on in the async handlers */
506 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
514 for (i = 0; i < nr_pages_ret; i++) {
515 WARN_ON(pages[i]->mapping);
516 page_cache_release(pages[i]);
524 * phase two of compressed writeback. This is the ordered portion
525 * of the code, which only gets called in the order the work was
526 * queued. We walk all the async extents created by compress_file_range
527 * and send them down to the disk.
529 static noinline int submit_compressed_extents(struct inode *inode,
530 struct async_cow *async_cow)
532 struct async_extent *async_extent;
534 struct btrfs_trans_handle *trans;
535 struct btrfs_key ins;
536 struct extent_map *em;
537 struct btrfs_root *root = BTRFS_I(inode)->root;
538 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
539 struct extent_io_tree *io_tree;
542 if (list_empty(&async_cow->extents))
545 trans = btrfs_join_transaction(root, 1);
547 while (!list_empty(&async_cow->extents)) {
548 async_extent = list_entry(async_cow->extents.next,
549 struct async_extent, list);
550 list_del(&async_extent->list);
552 io_tree = &BTRFS_I(inode)->io_tree;
554 /* did the compression code fall back to uncompressed IO? */
555 if (!async_extent->pages) {
556 int page_started = 0;
557 unsigned long nr_written = 0;
559 lock_extent(io_tree, async_extent->start,
560 async_extent->start +
561 async_extent->ram_size - 1, GFP_NOFS);
563 /* allocate blocks */
564 cow_file_range(inode, async_cow->locked_page,
566 async_extent->start +
567 async_extent->ram_size - 1,
568 &page_started, &nr_written, 0);
571 * if page_started, cow_file_range inserted an
572 * inline extent and took care of all the unlocking
573 * and IO for us. Otherwise, we need to submit
574 * all those pages down to the drive.
577 extent_write_locked_range(io_tree,
578 inode, async_extent->start,
579 async_extent->start +
580 async_extent->ram_size - 1,
588 lock_extent(io_tree, async_extent->start,
589 async_extent->start + async_extent->ram_size - 1,
592 * here we're doing allocation and writeback of the
595 btrfs_drop_extent_cache(inode, async_extent->start,
596 async_extent->start +
597 async_extent->ram_size - 1, 0);
599 ret = btrfs_reserve_extent(trans, root,
600 async_extent->compressed_size,
601 async_extent->compressed_size,
605 em = alloc_extent_map(GFP_NOFS);
606 em->start = async_extent->start;
607 em->len = async_extent->ram_size;
608 em->orig_start = em->start;
610 em->block_start = ins.objectid;
611 em->block_len = ins.offset;
612 em->bdev = root->fs_info->fs_devices->latest_bdev;
613 set_bit(EXTENT_FLAG_PINNED, &em->flags);
614 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
617 spin_lock(&em_tree->lock);
618 ret = add_extent_mapping(em_tree, em);
619 spin_unlock(&em_tree->lock);
620 if (ret != -EEXIST) {
624 btrfs_drop_extent_cache(inode, async_extent->start,
625 async_extent->start +
626 async_extent->ram_size - 1, 0);
629 ret = btrfs_add_ordered_extent(inode, async_extent->start,
631 async_extent->ram_size,
633 BTRFS_ORDERED_COMPRESSED);
636 btrfs_end_transaction(trans, root);
639 * clear dirty, set writeback and unlock the pages.
641 extent_clear_unlock_delalloc(inode,
642 &BTRFS_I(inode)->io_tree,
644 async_extent->start +
645 async_extent->ram_size - 1,
646 NULL, 1, 1, 0, 1, 1, 0);
648 ret = btrfs_submit_compressed_write(inode,
650 async_extent->ram_size,
652 ins.offset, async_extent->pages,
653 async_extent->nr_pages);
656 trans = btrfs_join_transaction(root, 1);
657 alloc_hint = ins.objectid + ins.offset;
662 btrfs_end_transaction(trans, root);
667 * when extent_io.c finds a delayed allocation range in the file,
668 * the call backs end up in this code. The basic idea is to
669 * allocate extents on disk for the range, and create ordered data structs
670 * in ram to track those extents.
672 * locked_page is the page that writepage had locked already. We use
673 * it to make sure we don't do extra locks or unlocks.
675 * *page_started is set to one if we unlock locked_page and do everything
676 * required to start IO on it. It may be clean and already done with
679 static noinline int cow_file_range(struct inode *inode,
680 struct page *locked_page,
681 u64 start, u64 end, int *page_started,
682 unsigned long *nr_written,
685 struct btrfs_root *root = BTRFS_I(inode)->root;
686 struct btrfs_trans_handle *trans;
689 unsigned long ram_size;
692 u64 blocksize = root->sectorsize;
694 u64 isize = i_size_read(inode);
695 struct btrfs_key ins;
696 struct extent_map *em;
697 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
700 trans = btrfs_join_transaction(root, 1);
702 btrfs_set_trans_block_group(trans, inode);
704 actual_end = min_t(u64, isize, end + 1);
706 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
707 num_bytes = max(blocksize, num_bytes);
708 disk_num_bytes = num_bytes;
712 /* lets try to make an inline extent */
713 ret = cow_file_range_inline(trans, root, inode,
714 start, end, 0, NULL);
716 extent_clear_unlock_delalloc(inode,
717 &BTRFS_I(inode)->io_tree,
718 start, end, NULL, 1, 1,
720 *nr_written = *nr_written +
721 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
728 BUG_ON(disk_num_bytes >
729 btrfs_super_total_bytes(&root->fs_info->super_copy));
731 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
733 while (disk_num_bytes > 0) {
734 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
735 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
736 root->sectorsize, 0, alloc_hint,
740 em = alloc_extent_map(GFP_NOFS);
742 em->orig_start = em->start;
744 ram_size = ins.offset;
745 em->len = ins.offset;
747 em->block_start = ins.objectid;
748 em->block_len = ins.offset;
749 em->bdev = root->fs_info->fs_devices->latest_bdev;
750 set_bit(EXTENT_FLAG_PINNED, &em->flags);
753 spin_lock(&em_tree->lock);
754 ret = add_extent_mapping(em_tree, em);
755 spin_unlock(&em_tree->lock);
756 if (ret != -EEXIST) {
760 btrfs_drop_extent_cache(inode, start,
761 start + ram_size - 1, 0);
764 cur_alloc_size = ins.offset;
765 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
766 ram_size, cur_alloc_size, 0);
769 if (root->root_key.objectid ==
770 BTRFS_DATA_RELOC_TREE_OBJECTID) {
771 ret = btrfs_reloc_clone_csums(inode, start,
776 if (disk_num_bytes < cur_alloc_size)
779 /* we're not doing compressed IO, don't unlock the first
780 * page (which the caller expects to stay locked), don't
781 * clear any dirty bits and don't set any writeback bits
783 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
784 start, start + ram_size - 1,
785 locked_page, unlock, 1,
787 disk_num_bytes -= cur_alloc_size;
788 num_bytes -= cur_alloc_size;
789 alloc_hint = ins.objectid + ins.offset;
790 start += cur_alloc_size;
794 btrfs_end_transaction(trans, root);
800 * work queue call back to started compression on a file and pages
802 static noinline void async_cow_start(struct btrfs_work *work)
804 struct async_cow *async_cow;
806 async_cow = container_of(work, struct async_cow, work);
808 compress_file_range(async_cow->inode, async_cow->locked_page,
809 async_cow->start, async_cow->end, async_cow,
812 async_cow->inode = NULL;
816 * work queue call back to submit previously compressed pages
818 static noinline void async_cow_submit(struct btrfs_work *work)
820 struct async_cow *async_cow;
821 struct btrfs_root *root;
822 unsigned long nr_pages;
824 async_cow = container_of(work, struct async_cow, work);
826 root = async_cow->root;
827 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
830 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
832 if (atomic_read(&root->fs_info->async_delalloc_pages) <
834 waitqueue_active(&root->fs_info->async_submit_wait))
835 wake_up(&root->fs_info->async_submit_wait);
837 if (async_cow->inode)
838 submit_compressed_extents(async_cow->inode, async_cow);
841 static noinline void async_cow_free(struct btrfs_work *work)
843 struct async_cow *async_cow;
844 async_cow = container_of(work, struct async_cow, work);
848 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
849 u64 start, u64 end, int *page_started,
850 unsigned long *nr_written)
852 struct async_cow *async_cow;
853 struct btrfs_root *root = BTRFS_I(inode)->root;
854 unsigned long nr_pages;
856 int limit = 10 * 1024 * 1042;
858 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
859 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
860 while (start < end) {
861 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
862 async_cow->inode = inode;
863 async_cow->root = root;
864 async_cow->locked_page = locked_page;
865 async_cow->start = start;
867 if (btrfs_test_flag(inode, NOCOMPRESS))
870 cur_end = min(end, start + 512 * 1024 - 1);
872 async_cow->end = cur_end;
873 INIT_LIST_HEAD(&async_cow->extents);
875 async_cow->work.func = async_cow_start;
876 async_cow->work.ordered_func = async_cow_submit;
877 async_cow->work.ordered_free = async_cow_free;
878 async_cow->work.flags = 0;
880 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
882 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
884 btrfs_queue_worker(&root->fs_info->delalloc_workers,
887 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
888 wait_event(root->fs_info->async_submit_wait,
889 (atomic_read(&root->fs_info->async_delalloc_pages) <
893 while (atomic_read(&root->fs_info->async_submit_draining) &&
894 atomic_read(&root->fs_info->async_delalloc_pages)) {
895 wait_event(root->fs_info->async_submit_wait,
896 (atomic_read(&root->fs_info->async_delalloc_pages) ==
900 *nr_written += nr_pages;
907 static noinline int csum_exist_in_range(struct btrfs_root *root,
908 u64 bytenr, u64 num_bytes)
911 struct btrfs_ordered_sum *sums;
914 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
915 bytenr + num_bytes - 1, &list);
916 if (ret == 0 && list_empty(&list))
919 while (!list_empty(&list)) {
920 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
921 list_del(&sums->list);
928 * when nowcow writeback call back. This checks for snapshots or COW copies
929 * of the extents that exist in the file, and COWs the file as required.
931 * If no cow copies or snapshots exist, we write directly to the existing
934 static noinline int run_delalloc_nocow(struct inode *inode,
935 struct page *locked_page,
936 u64 start, u64 end, int *page_started, int force,
937 unsigned long *nr_written)
939 struct btrfs_root *root = BTRFS_I(inode)->root;
940 struct btrfs_trans_handle *trans;
941 struct extent_buffer *leaf;
942 struct btrfs_path *path;
943 struct btrfs_file_extent_item *fi;
944 struct btrfs_key found_key;
956 path = btrfs_alloc_path();
958 trans = btrfs_join_transaction(root, 1);
964 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
967 if (ret > 0 && path->slots[0] > 0 && check_prev) {
968 leaf = path->nodes[0];
969 btrfs_item_key_to_cpu(leaf, &found_key,
971 if (found_key.objectid == inode->i_ino &&
972 found_key.type == BTRFS_EXTENT_DATA_KEY)
977 leaf = path->nodes[0];
978 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
979 ret = btrfs_next_leaf(root, path);
984 leaf = path->nodes[0];
990 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
992 if (found_key.objectid > inode->i_ino ||
993 found_key.type > BTRFS_EXTENT_DATA_KEY ||
994 found_key.offset > end)
997 if (found_key.offset > cur_offset) {
998 extent_end = found_key.offset;
1002 fi = btrfs_item_ptr(leaf, path->slots[0],
1003 struct btrfs_file_extent_item);
1004 extent_type = btrfs_file_extent_type(leaf, fi);
1006 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1007 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1008 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1009 extent_end = found_key.offset +
1010 btrfs_file_extent_num_bytes(leaf, fi);
1011 if (extent_end <= start) {
1015 if (disk_bytenr == 0)
1017 if (btrfs_file_extent_compression(leaf, fi) ||
1018 btrfs_file_extent_encryption(leaf, fi) ||
1019 btrfs_file_extent_other_encoding(leaf, fi))
1021 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1023 if (btrfs_extent_readonly(root, disk_bytenr))
1025 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1028 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1029 disk_bytenr += cur_offset - found_key.offset;
1030 num_bytes = min(end + 1, extent_end) - cur_offset;
1032 * force cow if csum exists in the range.
1033 * this ensure that csum for a given extent are
1034 * either valid or do not exist.
1036 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1039 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1040 extent_end = found_key.offset +
1041 btrfs_file_extent_inline_len(leaf, fi);
1042 extent_end = ALIGN(extent_end, root->sectorsize);
1047 if (extent_end <= start) {
1052 if (cow_start == (u64)-1)
1053 cow_start = cur_offset;
1054 cur_offset = extent_end;
1055 if (cur_offset > end)
1061 btrfs_release_path(root, path);
1062 if (cow_start != (u64)-1) {
1063 ret = cow_file_range(inode, locked_page, cow_start,
1064 found_key.offset - 1, page_started,
1067 cow_start = (u64)-1;
1070 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1071 struct extent_map *em;
1072 struct extent_map_tree *em_tree;
1073 em_tree = &BTRFS_I(inode)->extent_tree;
1074 em = alloc_extent_map(GFP_NOFS);
1075 em->start = cur_offset;
1076 em->orig_start = em->start;
1077 em->len = num_bytes;
1078 em->block_len = num_bytes;
1079 em->block_start = disk_bytenr;
1080 em->bdev = root->fs_info->fs_devices->latest_bdev;
1081 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1083 spin_lock(&em_tree->lock);
1084 ret = add_extent_mapping(em_tree, em);
1085 spin_unlock(&em_tree->lock);
1086 if (ret != -EEXIST) {
1087 free_extent_map(em);
1090 btrfs_drop_extent_cache(inode, em->start,
1091 em->start + em->len - 1, 0);
1093 type = BTRFS_ORDERED_PREALLOC;
1095 type = BTRFS_ORDERED_NOCOW;
1098 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1099 num_bytes, num_bytes, type);
1102 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1103 cur_offset, cur_offset + num_bytes - 1,
1104 locked_page, 1, 1, 1, 0, 0, 0);
1105 cur_offset = extent_end;
1106 if (cur_offset > end)
1109 btrfs_release_path(root, path);
1111 if (cur_offset <= end && cow_start == (u64)-1)
1112 cow_start = cur_offset;
1113 if (cow_start != (u64)-1) {
1114 ret = cow_file_range(inode, locked_page, cow_start, end,
1115 page_started, nr_written, 1);
1119 ret = btrfs_end_transaction(trans, root);
1121 btrfs_free_path(path);
1126 * extent_io.c call back to do delayed allocation processing
1128 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1129 u64 start, u64 end, int *page_started,
1130 unsigned long *nr_written)
1133 struct btrfs_root *root = BTRFS_I(inode)->root;
1135 if (btrfs_test_flag(inode, NODATACOW))
1136 ret = run_delalloc_nocow(inode, locked_page, start, end,
1137 page_started, 1, nr_written);
1138 else if (btrfs_test_flag(inode, PREALLOC))
1139 ret = run_delalloc_nocow(inode, locked_page, start, end,
1140 page_started, 0, nr_written);
1141 else if (!btrfs_test_opt(root, COMPRESS))
1142 ret = cow_file_range(inode, locked_page, start, end,
1143 page_started, nr_written, 1);
1145 ret = cow_file_range_async(inode, locked_page, start, end,
1146 page_started, nr_written);
1151 * extent_io.c set_bit_hook, used to track delayed allocation
1152 * bytes in this file, and to maintain the list of inodes that
1153 * have pending delalloc work to be done.
1155 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1156 unsigned long old, unsigned long bits)
1159 * set_bit and clear bit hooks normally require _irqsave/restore
1160 * but in this case, we are only testeing for the DELALLOC
1161 * bit, which is only set or cleared with irqs on
1163 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1164 struct btrfs_root *root = BTRFS_I(inode)->root;
1165 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1166 spin_lock(&root->fs_info->delalloc_lock);
1167 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1168 root->fs_info->delalloc_bytes += end - start + 1;
1169 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1170 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1171 &root->fs_info->delalloc_inodes);
1173 spin_unlock(&root->fs_info->delalloc_lock);
1179 * extent_io.c clear_bit_hook, see set_bit_hook for why
1181 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1182 unsigned long old, unsigned long bits)
1185 * set_bit and clear bit hooks normally require _irqsave/restore
1186 * but in this case, we are only testeing for the DELALLOC
1187 * bit, which is only set or cleared with irqs on
1189 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1190 struct btrfs_root *root = BTRFS_I(inode)->root;
1192 spin_lock(&root->fs_info->delalloc_lock);
1193 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1194 printk(KERN_INFO "btrfs warning: delalloc account "
1196 (unsigned long long)end - start + 1,
1197 (unsigned long long)
1198 root->fs_info->delalloc_bytes);
1199 btrfs_delalloc_free_space(root, inode, (u64)-1);
1200 root->fs_info->delalloc_bytes = 0;
1201 BTRFS_I(inode)->delalloc_bytes = 0;
1203 btrfs_delalloc_free_space(root, inode,
1205 root->fs_info->delalloc_bytes -= end - start + 1;
1206 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1208 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1209 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1210 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1212 spin_unlock(&root->fs_info->delalloc_lock);
1218 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1219 * we don't create bios that span stripes or chunks
1221 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1222 size_t size, struct bio *bio,
1223 unsigned long bio_flags)
1225 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1226 struct btrfs_mapping_tree *map_tree;
1227 u64 logical = (u64)bio->bi_sector << 9;
1232 if (bio_flags & EXTENT_BIO_COMPRESSED)
1235 length = bio->bi_size;
1236 map_tree = &root->fs_info->mapping_tree;
1237 map_length = length;
1238 ret = btrfs_map_block(map_tree, READ, logical,
1239 &map_length, NULL, 0);
1241 if (map_length < length + size)
1247 * in order to insert checksums into the metadata in large chunks,
1248 * we wait until bio submission time. All the pages in the bio are
1249 * checksummed and sums are attached onto the ordered extent record.
1251 * At IO completion time the cums attached on the ordered extent record
1252 * are inserted into the btree
1254 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1255 struct bio *bio, int mirror_num,
1256 unsigned long bio_flags)
1258 struct btrfs_root *root = BTRFS_I(inode)->root;
1261 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1267 * in order to insert checksums into the metadata in large chunks,
1268 * we wait until bio submission time. All the pages in the bio are
1269 * checksummed and sums are attached onto the ordered extent record.
1271 * At IO completion time the cums attached on the ordered extent record
1272 * are inserted into the btree
1274 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1275 int mirror_num, unsigned long bio_flags)
1277 struct btrfs_root *root = BTRFS_I(inode)->root;
1278 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1282 * extent_io.c submission hook. This does the right thing for csum calculation
1283 * on write, or reading the csums from the tree before a read
1285 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1286 int mirror_num, unsigned long bio_flags)
1288 struct btrfs_root *root = BTRFS_I(inode)->root;
1292 skip_sum = btrfs_test_flag(inode, NODATASUM);
1294 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1297 if (!(rw & (1 << BIO_RW))) {
1298 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1299 return btrfs_submit_compressed_read(inode, bio,
1300 mirror_num, bio_flags);
1301 } else if (!skip_sum)
1302 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1304 } else if (!skip_sum) {
1305 /* csum items have already been cloned */
1306 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1308 /* we're doing a write, do the async checksumming */
1309 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1310 inode, rw, bio, mirror_num,
1311 bio_flags, __btrfs_submit_bio_start,
1312 __btrfs_submit_bio_done);
1316 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1320 * given a list of ordered sums record them in the inode. This happens
1321 * at IO completion time based on sums calculated at bio submission time.
1323 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1324 struct inode *inode, u64 file_offset,
1325 struct list_head *list)
1327 struct btrfs_ordered_sum *sum;
1329 btrfs_set_trans_block_group(trans, inode);
1331 list_for_each_entry(sum, list, list) {
1332 btrfs_csum_file_blocks(trans,
1333 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1338 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1340 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1342 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1346 /* see btrfs_writepage_start_hook for details on why this is required */
1347 struct btrfs_writepage_fixup {
1349 struct btrfs_work work;
1352 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1354 struct btrfs_writepage_fixup *fixup;
1355 struct btrfs_ordered_extent *ordered;
1357 struct inode *inode;
1361 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1365 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1366 ClearPageChecked(page);
1370 inode = page->mapping->host;
1371 page_start = page_offset(page);
1372 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1374 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1376 /* already ordered? We're done */
1377 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1378 EXTENT_ORDERED, 0)) {
1382 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1384 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1385 page_end, GFP_NOFS);
1387 btrfs_start_ordered_extent(inode, ordered, 1);
1391 btrfs_set_extent_delalloc(inode, page_start, page_end);
1392 ClearPageChecked(page);
1394 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1397 page_cache_release(page);
1401 * There are a few paths in the higher layers of the kernel that directly
1402 * set the page dirty bit without asking the filesystem if it is a
1403 * good idea. This causes problems because we want to make sure COW
1404 * properly happens and the data=ordered rules are followed.
1406 * In our case any range that doesn't have the ORDERED bit set
1407 * hasn't been properly setup for IO. We kick off an async process
1408 * to fix it up. The async helper will wait for ordered extents, set
1409 * the delalloc bit and make it safe to write the page.
1411 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1413 struct inode *inode = page->mapping->host;
1414 struct btrfs_writepage_fixup *fixup;
1415 struct btrfs_root *root = BTRFS_I(inode)->root;
1418 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1423 if (PageChecked(page))
1426 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1430 SetPageChecked(page);
1431 page_cache_get(page);
1432 fixup->work.func = btrfs_writepage_fixup_worker;
1434 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1438 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1439 struct inode *inode, u64 file_pos,
1440 u64 disk_bytenr, u64 disk_num_bytes,
1441 u64 num_bytes, u64 ram_bytes,
1443 u8 compression, u8 encryption,
1444 u16 other_encoding, int extent_type)
1446 struct btrfs_root *root = BTRFS_I(inode)->root;
1447 struct btrfs_file_extent_item *fi;
1448 struct btrfs_path *path;
1449 struct extent_buffer *leaf;
1450 struct btrfs_key ins;
1454 path = btrfs_alloc_path();
1457 path->leave_spinning = 1;
1458 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1459 file_pos + num_bytes, locked_end,
1463 ins.objectid = inode->i_ino;
1464 ins.offset = file_pos;
1465 ins.type = BTRFS_EXTENT_DATA_KEY;
1466 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1468 leaf = path->nodes[0];
1469 fi = btrfs_item_ptr(leaf, path->slots[0],
1470 struct btrfs_file_extent_item);
1471 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1472 btrfs_set_file_extent_type(leaf, fi, extent_type);
1473 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1474 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1475 btrfs_set_file_extent_offset(leaf, fi, 0);
1476 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1477 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1478 btrfs_set_file_extent_compression(leaf, fi, compression);
1479 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1480 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1482 btrfs_unlock_up_safe(path, 1);
1483 btrfs_set_lock_blocking(leaf);
1485 btrfs_mark_buffer_dirty(leaf);
1487 inode_add_bytes(inode, num_bytes);
1488 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1490 ins.objectid = disk_bytenr;
1491 ins.offset = disk_num_bytes;
1492 ins.type = BTRFS_EXTENT_ITEM_KEY;
1493 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1494 root->root_key.objectid,
1495 trans->transid, inode->i_ino, &ins);
1497 btrfs_free_path(path);
1503 * helper function for btrfs_finish_ordered_io, this
1504 * just reads in some of the csum leaves to prime them into ram
1505 * before we start the transaction. It limits the amount of btree
1506 * reads required while inside the transaction.
1508 static noinline void reada_csum(struct btrfs_root *root,
1509 struct btrfs_path *path,
1510 struct btrfs_ordered_extent *ordered_extent)
1512 struct btrfs_ordered_sum *sum;
1515 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1517 bytenr = sum->sums[0].bytenr;
1520 * we don't care about the results, the point of this search is
1521 * just to get the btree leaves into ram
1523 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1526 /* as ordered data IO finishes, this gets called so we can finish
1527 * an ordered extent if the range of bytes in the file it covers are
1530 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1532 struct btrfs_root *root = BTRFS_I(inode)->root;
1533 struct btrfs_trans_handle *trans;
1534 struct btrfs_ordered_extent *ordered_extent = NULL;
1535 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1536 struct btrfs_path *path;
1540 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1545 * before we join the transaction, try to do some of our IO.
1546 * This will limit the amount of IO that we have to do with
1547 * the transaction running. We're unlikely to need to do any
1548 * IO if the file extents are new, the disk_i_size checks
1549 * covers the most common case.
1551 if (start < BTRFS_I(inode)->disk_i_size) {
1552 path = btrfs_alloc_path();
1554 ret = btrfs_lookup_file_extent(NULL, root, path,
1557 ordered_extent = btrfs_lookup_ordered_extent(inode,
1559 if (!list_empty(&ordered_extent->list)) {
1560 btrfs_release_path(root, path);
1561 reada_csum(root, path, ordered_extent);
1563 btrfs_free_path(path);
1567 trans = btrfs_join_transaction(root, 1);
1569 if (!ordered_extent)
1570 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1571 BUG_ON(!ordered_extent);
1572 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1575 lock_extent(io_tree, ordered_extent->file_offset,
1576 ordered_extent->file_offset + ordered_extent->len - 1,
1579 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1581 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1583 ret = btrfs_mark_extent_written(trans, root, inode,
1584 ordered_extent->file_offset,
1585 ordered_extent->file_offset +
1586 ordered_extent->len);
1589 ret = insert_reserved_file_extent(trans, inode,
1590 ordered_extent->file_offset,
1591 ordered_extent->start,
1592 ordered_extent->disk_len,
1593 ordered_extent->len,
1594 ordered_extent->len,
1595 ordered_extent->file_offset +
1596 ordered_extent->len,
1598 BTRFS_FILE_EXTENT_REG);
1601 unlock_extent(io_tree, ordered_extent->file_offset,
1602 ordered_extent->file_offset + ordered_extent->len - 1,
1605 add_pending_csums(trans, inode, ordered_extent->file_offset,
1606 &ordered_extent->list);
1608 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1609 btrfs_ordered_update_i_size(inode, ordered_extent);
1610 btrfs_update_inode(trans, root, inode);
1611 btrfs_remove_ordered_extent(inode, ordered_extent);
1612 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1615 btrfs_put_ordered_extent(ordered_extent);
1616 /* once for the tree */
1617 btrfs_put_ordered_extent(ordered_extent);
1619 btrfs_end_transaction(trans, root);
1623 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1624 struct extent_state *state, int uptodate)
1626 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1630 * When IO fails, either with EIO or csum verification fails, we
1631 * try other mirrors that might have a good copy of the data. This
1632 * io_failure_record is used to record state as we go through all the
1633 * mirrors. If another mirror has good data, the page is set up to date
1634 * and things continue. If a good mirror can't be found, the original
1635 * bio end_io callback is called to indicate things have failed.
1637 struct io_failure_record {
1642 unsigned long bio_flags;
1646 static int btrfs_io_failed_hook(struct bio *failed_bio,
1647 struct page *page, u64 start, u64 end,
1648 struct extent_state *state)
1650 struct io_failure_record *failrec = NULL;
1652 struct extent_map *em;
1653 struct inode *inode = page->mapping->host;
1654 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1655 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1662 ret = get_state_private(failure_tree, start, &private);
1664 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1667 failrec->start = start;
1668 failrec->len = end - start + 1;
1669 failrec->last_mirror = 0;
1670 failrec->bio_flags = 0;
1672 spin_lock(&em_tree->lock);
1673 em = lookup_extent_mapping(em_tree, start, failrec->len);
1674 if (em->start > start || em->start + em->len < start) {
1675 free_extent_map(em);
1678 spin_unlock(&em_tree->lock);
1680 if (!em || IS_ERR(em)) {
1684 logical = start - em->start;
1685 logical = em->block_start + logical;
1686 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1687 logical = em->block_start;
1688 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1690 failrec->logical = logical;
1691 free_extent_map(em);
1692 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1693 EXTENT_DIRTY, GFP_NOFS);
1694 set_state_private(failure_tree, start,
1695 (u64)(unsigned long)failrec);
1697 failrec = (struct io_failure_record *)(unsigned long)private;
1699 num_copies = btrfs_num_copies(
1700 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1701 failrec->logical, failrec->len);
1702 failrec->last_mirror++;
1704 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1705 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1708 if (state && state->start != failrec->start)
1710 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1712 if (!state || failrec->last_mirror > num_copies) {
1713 set_state_private(failure_tree, failrec->start, 0);
1714 clear_extent_bits(failure_tree, failrec->start,
1715 failrec->start + failrec->len - 1,
1716 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1720 bio = bio_alloc(GFP_NOFS, 1);
1721 bio->bi_private = state;
1722 bio->bi_end_io = failed_bio->bi_end_io;
1723 bio->bi_sector = failrec->logical >> 9;
1724 bio->bi_bdev = failed_bio->bi_bdev;
1727 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1728 if (failed_bio->bi_rw & (1 << BIO_RW))
1733 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1734 failrec->last_mirror,
1735 failrec->bio_flags);
1740 * each time an IO finishes, we do a fast check in the IO failure tree
1741 * to see if we need to process or clean up an io_failure_record
1743 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1746 u64 private_failure;
1747 struct io_failure_record *failure;
1751 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1752 (u64)-1, 1, EXTENT_DIRTY)) {
1753 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1754 start, &private_failure);
1756 failure = (struct io_failure_record *)(unsigned long)
1758 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1760 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1762 failure->start + failure->len - 1,
1763 EXTENT_DIRTY | EXTENT_LOCKED,
1772 * when reads are done, we need to check csums to verify the data is correct
1773 * if there's a match, we allow the bio to finish. If not, we go through
1774 * the io_failure_record routines to find good copies
1776 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1777 struct extent_state *state)
1779 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1780 struct inode *inode = page->mapping->host;
1781 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1783 u64 private = ~(u32)0;
1785 struct btrfs_root *root = BTRFS_I(inode)->root;
1788 if (PageChecked(page)) {
1789 ClearPageChecked(page);
1792 if (btrfs_test_flag(inode, NODATASUM))
1795 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1796 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1797 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1802 if (state && state->start == start) {
1803 private = state->private;
1806 ret = get_state_private(io_tree, start, &private);
1808 kaddr = kmap_atomic(page, KM_USER0);
1812 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1813 btrfs_csum_final(csum, (char *)&csum);
1814 if (csum != private)
1817 kunmap_atomic(kaddr, KM_USER0);
1819 /* if the io failure tree for this inode is non-empty,
1820 * check to see if we've recovered from a failed IO
1822 btrfs_clean_io_failures(inode, start);
1826 if (printk_ratelimit()) {
1827 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1828 "private %llu\n", page->mapping->host->i_ino,
1829 (unsigned long long)start, csum,
1830 (unsigned long long)private);
1832 memset(kaddr + offset, 1, end - start + 1);
1833 flush_dcache_page(page);
1834 kunmap_atomic(kaddr, KM_USER0);
1841 * This creates an orphan entry for the given inode in case something goes
1842 * wrong in the middle of an unlink/truncate.
1844 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1846 struct btrfs_root *root = BTRFS_I(inode)->root;
1849 spin_lock(&root->list_lock);
1851 /* already on the orphan list, we're good */
1852 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1853 spin_unlock(&root->list_lock);
1857 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1859 spin_unlock(&root->list_lock);
1862 * insert an orphan item to track this unlinked/truncated file
1864 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1870 * We have done the truncate/delete so we can go ahead and remove the orphan
1871 * item for this particular inode.
1873 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1875 struct btrfs_root *root = BTRFS_I(inode)->root;
1878 spin_lock(&root->list_lock);
1880 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1881 spin_unlock(&root->list_lock);
1885 list_del_init(&BTRFS_I(inode)->i_orphan);
1887 spin_unlock(&root->list_lock);
1891 spin_unlock(&root->list_lock);
1893 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1899 * this cleans up any orphans that may be left on the list from the last use
1902 void btrfs_orphan_cleanup(struct btrfs_root *root)
1904 struct btrfs_path *path;
1905 struct extent_buffer *leaf;
1906 struct btrfs_item *item;
1907 struct btrfs_key key, found_key;
1908 struct btrfs_trans_handle *trans;
1909 struct inode *inode;
1910 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1912 path = btrfs_alloc_path();
1917 key.objectid = BTRFS_ORPHAN_OBJECTID;
1918 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1919 key.offset = (u64)-1;
1923 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1925 printk(KERN_ERR "Error searching slot for orphan: %d"
1931 * if ret == 0 means we found what we were searching for, which
1932 * is weird, but possible, so only screw with path if we didnt
1933 * find the key and see if we have stuff that matches
1936 if (path->slots[0] == 0)
1941 /* pull out the item */
1942 leaf = path->nodes[0];
1943 item = btrfs_item_nr(leaf, path->slots[0]);
1944 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1946 /* make sure the item matches what we want */
1947 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1949 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1952 /* release the path since we're done with it */
1953 btrfs_release_path(root, path);
1956 * this is where we are basically btrfs_lookup, without the
1957 * crossing root thing. we store the inode number in the
1958 * offset of the orphan item.
1960 inode = btrfs_iget_locked(root->fs_info->sb,
1961 found_key.offset, root);
1965 if (inode->i_state & I_NEW) {
1966 BTRFS_I(inode)->root = root;
1968 /* have to set the location manually */
1969 BTRFS_I(inode)->location.objectid = inode->i_ino;
1970 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1971 BTRFS_I(inode)->location.offset = 0;
1973 btrfs_read_locked_inode(inode);
1974 unlock_new_inode(inode);
1978 * add this inode to the orphan list so btrfs_orphan_del does
1979 * the proper thing when we hit it
1981 spin_lock(&root->list_lock);
1982 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1983 spin_unlock(&root->list_lock);
1986 * if this is a bad inode, means we actually succeeded in
1987 * removing the inode, but not the orphan record, which means
1988 * we need to manually delete the orphan since iput will just
1989 * do a destroy_inode
1991 if (is_bad_inode(inode)) {
1992 trans = btrfs_start_transaction(root, 1);
1993 btrfs_orphan_del(trans, inode);
1994 btrfs_end_transaction(trans, root);
1999 /* if we have links, this was a truncate, lets do that */
2000 if (inode->i_nlink) {
2002 btrfs_truncate(inode);
2007 /* this will do delete_inode and everything for us */
2012 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2014 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2016 btrfs_free_path(path);
2020 * read an inode from the btree into the in-memory inode
2022 void btrfs_read_locked_inode(struct inode *inode)
2024 struct btrfs_path *path;
2025 struct extent_buffer *leaf;
2026 struct btrfs_inode_item *inode_item;
2027 struct btrfs_timespec *tspec;
2028 struct btrfs_root *root = BTRFS_I(inode)->root;
2029 struct btrfs_key location;
2030 u64 alloc_group_block;
2034 path = btrfs_alloc_path();
2036 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2038 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2042 leaf = path->nodes[0];
2043 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2044 struct btrfs_inode_item);
2046 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2047 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2048 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2049 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2050 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2052 tspec = btrfs_inode_atime(inode_item);
2053 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2054 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2056 tspec = btrfs_inode_mtime(inode_item);
2057 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2058 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2060 tspec = btrfs_inode_ctime(inode_item);
2061 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2062 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2064 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2065 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2066 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2067 inode->i_generation = BTRFS_I(inode)->generation;
2069 rdev = btrfs_inode_rdev(leaf, inode_item);
2071 BTRFS_I(inode)->index_cnt = (u64)-1;
2072 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2074 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2076 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2077 alloc_group_block, 0);
2078 btrfs_free_path(path);
2081 switch (inode->i_mode & S_IFMT) {
2083 inode->i_mapping->a_ops = &btrfs_aops;
2084 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2085 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2086 inode->i_fop = &btrfs_file_operations;
2087 inode->i_op = &btrfs_file_inode_operations;
2090 inode->i_fop = &btrfs_dir_file_operations;
2091 if (root == root->fs_info->tree_root)
2092 inode->i_op = &btrfs_dir_ro_inode_operations;
2094 inode->i_op = &btrfs_dir_inode_operations;
2097 inode->i_op = &btrfs_symlink_inode_operations;
2098 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2099 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2102 inode->i_op = &btrfs_special_inode_operations;
2103 init_special_inode(inode, inode->i_mode, rdev);
2109 btrfs_free_path(path);
2110 make_bad_inode(inode);
2114 * given a leaf and an inode, copy the inode fields into the leaf
2116 static void fill_inode_item(struct btrfs_trans_handle *trans,
2117 struct extent_buffer *leaf,
2118 struct btrfs_inode_item *item,
2119 struct inode *inode)
2121 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2122 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2123 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2124 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2125 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2127 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2128 inode->i_atime.tv_sec);
2129 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2130 inode->i_atime.tv_nsec);
2132 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2133 inode->i_mtime.tv_sec);
2134 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2135 inode->i_mtime.tv_nsec);
2137 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2138 inode->i_ctime.tv_sec);
2139 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2140 inode->i_ctime.tv_nsec);
2142 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2143 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2144 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2145 btrfs_set_inode_transid(leaf, item, trans->transid);
2146 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2147 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2148 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2152 * copy everything in the in-memory inode into the btree.
2154 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2155 struct btrfs_root *root, struct inode *inode)
2157 struct btrfs_inode_item *inode_item;
2158 struct btrfs_path *path;
2159 struct extent_buffer *leaf;
2162 path = btrfs_alloc_path();
2164 path->leave_spinning = 1;
2165 ret = btrfs_lookup_inode(trans, root, path,
2166 &BTRFS_I(inode)->location, 1);
2173 btrfs_unlock_up_safe(path, 1);
2174 leaf = path->nodes[0];
2175 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2176 struct btrfs_inode_item);
2178 fill_inode_item(trans, leaf, inode_item, inode);
2179 btrfs_mark_buffer_dirty(leaf);
2180 btrfs_set_inode_last_trans(trans, inode);
2183 btrfs_free_path(path);
2189 * unlink helper that gets used here in inode.c and in the tree logging
2190 * recovery code. It remove a link in a directory with a given name, and
2191 * also drops the back refs in the inode to the directory
2193 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2194 struct btrfs_root *root,
2195 struct inode *dir, struct inode *inode,
2196 const char *name, int name_len)
2198 struct btrfs_path *path;
2200 struct extent_buffer *leaf;
2201 struct btrfs_dir_item *di;
2202 struct btrfs_key key;
2205 path = btrfs_alloc_path();
2211 path->leave_spinning = 1;
2212 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2213 name, name_len, -1);
2222 leaf = path->nodes[0];
2223 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2224 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2227 btrfs_release_path(root, path);
2229 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2231 dir->i_ino, &index);
2233 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2234 "inode %lu parent %lu\n", name_len, name,
2235 inode->i_ino, dir->i_ino);
2239 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2240 index, name, name_len, -1);
2249 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2250 btrfs_release_path(root, path);
2252 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2254 BUG_ON(ret != 0 && ret != -ENOENT);
2256 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2260 btrfs_free_path(path);
2264 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2265 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2266 btrfs_update_inode(trans, root, dir);
2267 btrfs_drop_nlink(inode);
2268 ret = btrfs_update_inode(trans, root, inode);
2269 dir->i_sb->s_dirt = 1;
2274 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2276 struct btrfs_root *root;
2277 struct btrfs_trans_handle *trans;
2278 struct inode *inode = dentry->d_inode;
2280 unsigned long nr = 0;
2282 root = BTRFS_I(dir)->root;
2284 trans = btrfs_start_transaction(root, 1);
2286 btrfs_set_trans_block_group(trans, dir);
2288 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2290 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2291 dentry->d_name.name, dentry->d_name.len);
2293 if (inode->i_nlink == 0)
2294 ret = btrfs_orphan_add(trans, inode);
2296 nr = trans->blocks_used;
2298 btrfs_end_transaction_throttle(trans, root);
2299 btrfs_btree_balance_dirty(root, nr);
2303 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2305 struct inode *inode = dentry->d_inode;
2308 struct btrfs_root *root = BTRFS_I(dir)->root;
2309 struct btrfs_trans_handle *trans;
2310 unsigned long nr = 0;
2313 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2314 * the root of a subvolume or snapshot
2316 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2317 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2321 trans = btrfs_start_transaction(root, 1);
2322 btrfs_set_trans_block_group(trans, dir);
2324 err = btrfs_orphan_add(trans, inode);
2328 /* now the directory is empty */
2329 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2330 dentry->d_name.name, dentry->d_name.len);
2332 btrfs_i_size_write(inode, 0);
2335 nr = trans->blocks_used;
2336 ret = btrfs_end_transaction_throttle(trans, root);
2337 btrfs_btree_balance_dirty(root, nr);
2346 * when truncating bytes in a file, it is possible to avoid reading
2347 * the leaves that contain only checksum items. This can be the
2348 * majority of the IO required to delete a large file, but it must
2349 * be done carefully.
2351 * The keys in the level just above the leaves are checked to make sure
2352 * the lowest key in a given leaf is a csum key, and starts at an offset
2353 * after the new size.
2355 * Then the key for the next leaf is checked to make sure it also has
2356 * a checksum item for the same file. If it does, we know our target leaf
2357 * contains only checksum items, and it can be safely freed without reading
2360 * This is just an optimization targeted at large files. It may do
2361 * nothing. It will return 0 unless things went badly.
2363 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2364 struct btrfs_root *root,
2365 struct btrfs_path *path,
2366 struct inode *inode, u64 new_size)
2368 struct btrfs_key key;
2371 struct btrfs_key found_key;
2372 struct btrfs_key other_key;
2373 struct btrfs_leaf_ref *ref;
2377 path->lowest_level = 1;
2378 key.objectid = inode->i_ino;
2379 key.type = BTRFS_CSUM_ITEM_KEY;
2380 key.offset = new_size;
2382 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2386 if (path->nodes[1] == NULL) {
2391 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2392 nritems = btrfs_header_nritems(path->nodes[1]);
2397 if (path->slots[1] >= nritems)
2400 /* did we find a key greater than anything we want to delete? */
2401 if (found_key.objectid > inode->i_ino ||
2402 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2405 /* we check the next key in the node to make sure the leave contains
2406 * only checksum items. This comparison doesn't work if our
2407 * leaf is the last one in the node
2409 if (path->slots[1] + 1 >= nritems) {
2411 /* search forward from the last key in the node, this
2412 * will bring us into the next node in the tree
2414 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2416 /* unlikely, but we inc below, so check to be safe */
2417 if (found_key.offset == (u64)-1)
2420 /* search_forward needs a path with locks held, do the
2421 * search again for the original key. It is possible
2422 * this will race with a balance and return a path that
2423 * we could modify, but this drop is just an optimization
2424 * and is allowed to miss some leaves.
2426 btrfs_release_path(root, path);
2429 /* setup a max key for search_forward */
2430 other_key.offset = (u64)-1;
2431 other_key.type = key.type;
2432 other_key.objectid = key.objectid;
2434 path->keep_locks = 1;
2435 ret = btrfs_search_forward(root, &found_key, &other_key,
2437 path->keep_locks = 0;
2438 if (ret || found_key.objectid != key.objectid ||
2439 found_key.type != key.type) {
2444 key.offset = found_key.offset;
2445 btrfs_release_path(root, path);
2450 /* we know there's one more slot after us in the tree,
2451 * read that key so we can verify it is also a checksum item
2453 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2455 if (found_key.objectid < inode->i_ino)
2458 if (found_key.type != key.type || found_key.offset < new_size)
2462 * if the key for the next leaf isn't a csum key from this objectid,
2463 * we can't be sure there aren't good items inside this leaf.
2466 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2469 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2470 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2472 * it is safe to delete this leaf, it contains only
2473 * csum items from this inode at an offset >= new_size
2475 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2478 if (root->ref_cows && leaf_gen < trans->transid) {
2479 ref = btrfs_alloc_leaf_ref(root, 0);
2481 ref->root_gen = root->root_key.offset;
2482 ref->bytenr = leaf_start;
2484 ref->generation = leaf_gen;
2487 btrfs_sort_leaf_ref(ref);
2489 ret = btrfs_add_leaf_ref(root, ref, 0);
2491 btrfs_free_leaf_ref(root, ref);
2497 btrfs_release_path(root, path);
2499 if (other_key.objectid == inode->i_ino &&
2500 other_key.type == key.type && other_key.offset > key.offset) {
2501 key.offset = other_key.offset;
2507 /* fixup any changes we've made to the path */
2508 path->lowest_level = 0;
2509 path->keep_locks = 0;
2510 btrfs_release_path(root, path);
2517 * this can truncate away extent items, csum items and directory items.
2518 * It starts at a high offset and removes keys until it can't find
2519 * any higher than new_size
2521 * csum items that cross the new i_size are truncated to the new size
2524 * min_type is the minimum key type to truncate down to. If set to 0, this
2525 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2527 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2528 struct btrfs_root *root,
2529 struct inode *inode,
2530 u64 new_size, u32 min_type)
2533 struct btrfs_path *path;
2534 struct btrfs_key key;
2535 struct btrfs_key found_key;
2536 u32 found_type = (u8)-1;
2537 struct extent_buffer *leaf;
2538 struct btrfs_file_extent_item *fi;
2539 u64 extent_start = 0;
2540 u64 extent_num_bytes = 0;
2546 int pending_del_nr = 0;
2547 int pending_del_slot = 0;
2548 int extent_type = -1;
2550 u64 mask = root->sectorsize - 1;
2553 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2554 path = btrfs_alloc_path();
2558 /* FIXME, add redo link to tree so we don't leak on crash */
2559 key.objectid = inode->i_ino;
2560 key.offset = (u64)-1;
2564 path->leave_spinning = 1;
2565 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2570 /* there are no items in the tree for us to truncate, we're
2573 if (path->slots[0] == 0) {
2582 leaf = path->nodes[0];
2583 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2584 found_type = btrfs_key_type(&found_key);
2587 if (found_key.objectid != inode->i_ino)
2590 if (found_type < min_type)
2593 item_end = found_key.offset;
2594 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2595 fi = btrfs_item_ptr(leaf, path->slots[0],
2596 struct btrfs_file_extent_item);
2597 extent_type = btrfs_file_extent_type(leaf, fi);
2598 encoding = btrfs_file_extent_compression(leaf, fi);
2599 encoding |= btrfs_file_extent_encryption(leaf, fi);
2600 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2602 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2604 btrfs_file_extent_num_bytes(leaf, fi);
2605 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2606 item_end += btrfs_file_extent_inline_len(leaf,
2611 if (item_end < new_size) {
2612 if (found_type == BTRFS_DIR_ITEM_KEY)
2613 found_type = BTRFS_INODE_ITEM_KEY;
2614 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2615 found_type = BTRFS_EXTENT_DATA_KEY;
2616 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2617 found_type = BTRFS_XATTR_ITEM_KEY;
2618 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2619 found_type = BTRFS_INODE_REF_KEY;
2620 else if (found_type)
2624 btrfs_set_key_type(&key, found_type);
2627 if (found_key.offset >= new_size)
2633 /* FIXME, shrink the extent if the ref count is only 1 */
2634 if (found_type != BTRFS_EXTENT_DATA_KEY)
2637 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2639 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2640 if (!del_item && !encoding) {
2641 u64 orig_num_bytes =
2642 btrfs_file_extent_num_bytes(leaf, fi);
2643 extent_num_bytes = new_size -
2644 found_key.offset + root->sectorsize - 1;
2645 extent_num_bytes = extent_num_bytes &
2646 ~((u64)root->sectorsize - 1);
2647 btrfs_set_file_extent_num_bytes(leaf, fi,
2649 num_dec = (orig_num_bytes -
2651 if (root->ref_cows && extent_start != 0)
2652 inode_sub_bytes(inode, num_dec);
2653 btrfs_mark_buffer_dirty(leaf);
2656 btrfs_file_extent_disk_num_bytes(leaf,
2658 /* FIXME blocksize != 4096 */
2659 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2660 if (extent_start != 0) {
2663 inode_sub_bytes(inode, num_dec);
2665 root_gen = btrfs_header_generation(leaf);
2666 root_owner = btrfs_header_owner(leaf);
2668 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2670 * we can't truncate inline items that have had
2674 btrfs_file_extent_compression(leaf, fi) == 0 &&
2675 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2676 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2677 u32 size = new_size - found_key.offset;
2679 if (root->ref_cows) {
2680 inode_sub_bytes(inode, item_end + 1 -
2684 btrfs_file_extent_calc_inline_size(size);
2685 ret = btrfs_truncate_item(trans, root, path,
2688 } else if (root->ref_cows) {
2689 inode_sub_bytes(inode, item_end + 1 -
2695 if (!pending_del_nr) {
2696 /* no pending yet, add ourselves */
2697 pending_del_slot = path->slots[0];
2699 } else if (pending_del_nr &&
2700 path->slots[0] + 1 == pending_del_slot) {
2701 /* hop on the pending chunk */
2703 pending_del_slot = path->slots[0];
2711 btrfs_set_path_blocking(path);
2712 ret = btrfs_free_extent(trans, root, extent_start,
2714 leaf->start, root_owner,
2715 root_gen, inode->i_ino, 0);
2719 if (path->slots[0] == 0) {
2722 btrfs_release_path(root, path);
2723 if (found_type == BTRFS_INODE_ITEM_KEY)
2729 if (pending_del_nr &&
2730 path->slots[0] + 1 != pending_del_slot) {
2731 struct btrfs_key debug;
2733 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2735 ret = btrfs_del_items(trans, root, path,
2740 btrfs_release_path(root, path);
2741 if (found_type == BTRFS_INODE_ITEM_KEY)
2748 if (pending_del_nr) {
2749 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2752 btrfs_free_path(path);
2753 inode->i_sb->s_dirt = 1;
2758 * taken from block_truncate_page, but does cow as it zeros out
2759 * any bytes left in the last page in the file.
2761 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2763 struct inode *inode = mapping->host;
2764 struct btrfs_root *root = BTRFS_I(inode)->root;
2765 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2766 struct btrfs_ordered_extent *ordered;
2768 u32 blocksize = root->sectorsize;
2769 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2770 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2776 if ((offset & (blocksize - 1)) == 0)
2781 page = grab_cache_page(mapping, index);
2785 page_start = page_offset(page);
2786 page_end = page_start + PAGE_CACHE_SIZE - 1;
2788 if (!PageUptodate(page)) {
2789 ret = btrfs_readpage(NULL, page);
2791 if (page->mapping != mapping) {
2793 page_cache_release(page);
2796 if (!PageUptodate(page)) {
2801 wait_on_page_writeback(page);
2803 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2804 set_page_extent_mapped(page);
2806 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2808 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2810 page_cache_release(page);
2811 btrfs_start_ordered_extent(inode, ordered, 1);
2812 btrfs_put_ordered_extent(ordered);
2816 btrfs_set_extent_delalloc(inode, page_start, page_end);
2818 if (offset != PAGE_CACHE_SIZE) {
2820 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2821 flush_dcache_page(page);
2824 ClearPageChecked(page);
2825 set_page_dirty(page);
2826 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2830 page_cache_release(page);
2835 int btrfs_cont_expand(struct inode *inode, loff_t size)
2837 struct btrfs_trans_handle *trans;
2838 struct btrfs_root *root = BTRFS_I(inode)->root;
2839 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2840 struct extent_map *em;
2841 u64 mask = root->sectorsize - 1;
2842 u64 hole_start = (inode->i_size + mask) & ~mask;
2843 u64 block_end = (size + mask) & ~mask;
2849 if (size <= hole_start)
2852 err = btrfs_check_metadata_free_space(root);
2856 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2859 struct btrfs_ordered_extent *ordered;
2860 btrfs_wait_ordered_range(inode, hole_start,
2861 block_end - hole_start);
2862 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2863 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2866 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2867 btrfs_put_ordered_extent(ordered);
2870 trans = btrfs_start_transaction(root, 1);
2871 btrfs_set_trans_block_group(trans, inode);
2873 cur_offset = hole_start;
2875 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2876 block_end - cur_offset, 0);
2877 BUG_ON(IS_ERR(em) || !em);
2878 last_byte = min(extent_map_end(em), block_end);
2879 last_byte = (last_byte + mask) & ~mask;
2880 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2882 hole_size = last_byte - cur_offset;
2883 err = btrfs_drop_extents(trans, root, inode,
2885 cur_offset + hole_size,
2887 cur_offset, &hint_byte);
2890 err = btrfs_insert_file_extent(trans, root,
2891 inode->i_ino, cur_offset, 0,
2892 0, hole_size, 0, hole_size,
2894 btrfs_drop_extent_cache(inode, hole_start,
2897 free_extent_map(em);
2898 cur_offset = last_byte;
2899 if (err || cur_offset >= block_end)
2903 btrfs_end_transaction(trans, root);
2904 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2908 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2910 struct inode *inode = dentry->d_inode;
2913 err = inode_change_ok(inode, attr);
2917 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2918 if (attr->ia_size > inode->i_size) {
2919 err = btrfs_cont_expand(inode, attr->ia_size);
2922 } else if (inode->i_size > 0 &&
2923 attr->ia_size == 0) {
2925 /* we're truncating a file that used to have good
2926 * data down to zero. Make sure it gets into
2927 * the ordered flush list so that any new writes
2928 * get down to disk quickly.
2930 BTRFS_I(inode)->ordered_data_close = 1;
2934 err = inode_setattr(inode, attr);
2936 if (!err && ((attr->ia_valid & ATTR_MODE)))
2937 err = btrfs_acl_chmod(inode);
2941 void btrfs_delete_inode(struct inode *inode)
2943 struct btrfs_trans_handle *trans;
2944 struct btrfs_root *root = BTRFS_I(inode)->root;
2948 truncate_inode_pages(&inode->i_data, 0);
2949 if (is_bad_inode(inode)) {
2950 btrfs_orphan_del(NULL, inode);
2953 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2955 btrfs_i_size_write(inode, 0);
2956 trans = btrfs_join_transaction(root, 1);
2958 btrfs_set_trans_block_group(trans, inode);
2959 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2961 btrfs_orphan_del(NULL, inode);
2962 goto no_delete_lock;
2965 btrfs_orphan_del(trans, inode);
2967 nr = trans->blocks_used;
2970 btrfs_end_transaction(trans, root);
2971 btrfs_btree_balance_dirty(root, nr);
2975 nr = trans->blocks_used;
2976 btrfs_end_transaction(trans, root);
2977 btrfs_btree_balance_dirty(root, nr);
2983 * this returns the key found in the dir entry in the location pointer.
2984 * If no dir entries were found, location->objectid is 0.
2986 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2987 struct btrfs_key *location)
2989 const char *name = dentry->d_name.name;
2990 int namelen = dentry->d_name.len;
2991 struct btrfs_dir_item *di;
2992 struct btrfs_path *path;
2993 struct btrfs_root *root = BTRFS_I(dir)->root;
2996 path = btrfs_alloc_path();
2999 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3004 if (!di || IS_ERR(di))
3007 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3009 btrfs_free_path(path);
3012 location->objectid = 0;
3017 * when we hit a tree root in a directory, the btrfs part of the inode
3018 * needs to be changed to reflect the root directory of the tree root. This
3019 * is kind of like crossing a mount point.
3021 static int fixup_tree_root_location(struct btrfs_root *root,
3022 struct btrfs_key *location,
3023 struct btrfs_root **sub_root,
3024 struct dentry *dentry)
3026 struct btrfs_root_item *ri;
3028 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3030 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3033 *sub_root = btrfs_read_fs_root(root->fs_info, location,
3034 dentry->d_name.name,
3035 dentry->d_name.len);
3036 if (IS_ERR(*sub_root))
3037 return PTR_ERR(*sub_root);
3039 ri = &(*sub_root)->root_item;
3040 location->objectid = btrfs_root_dirid(ri);
3041 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3042 location->offset = 0;
3047 static noinline void init_btrfs_i(struct inode *inode)
3049 struct btrfs_inode *bi = BTRFS_I(inode);
3052 bi->i_default_acl = NULL;
3057 bi->logged_trans = 0;
3058 bi->delalloc_bytes = 0;
3059 bi->reserved_bytes = 0;
3060 bi->disk_i_size = 0;
3062 bi->index_cnt = (u64)-1;
3063 bi->last_unlink_trans = 0;
3064 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3065 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3066 inode->i_mapping, GFP_NOFS);
3067 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3068 inode->i_mapping, GFP_NOFS);
3069 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3070 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3071 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3072 mutex_init(&BTRFS_I(inode)->extent_mutex);
3073 mutex_init(&BTRFS_I(inode)->log_mutex);
3076 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3078 struct btrfs_iget_args *args = p;
3079 inode->i_ino = args->ino;
3080 init_btrfs_i(inode);
3081 BTRFS_I(inode)->root = args->root;
3082 btrfs_set_inode_space_info(args->root, inode);
3086 static int btrfs_find_actor(struct inode *inode, void *opaque)
3088 struct btrfs_iget_args *args = opaque;
3089 return args->ino == inode->i_ino &&
3090 args->root == BTRFS_I(inode)->root;
3093 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3094 struct btrfs_root *root, int wait)
3096 struct inode *inode;
3097 struct btrfs_iget_args args;
3098 args.ino = objectid;
3102 inode = ilookup5(s, objectid, btrfs_find_actor,
3105 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3111 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3112 struct btrfs_root *root)
3114 struct inode *inode;
3115 struct btrfs_iget_args args;
3116 args.ino = objectid;
3119 inode = iget5_locked(s, objectid, btrfs_find_actor,
3120 btrfs_init_locked_inode,
3125 /* Get an inode object given its location and corresponding root.
3126 * Returns in *is_new if the inode was read from disk
3128 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3129 struct btrfs_root *root, int *is_new)
3131 struct inode *inode;
3133 inode = btrfs_iget_locked(s, location->objectid, root);
3135 return ERR_PTR(-EACCES);
3137 if (inode->i_state & I_NEW) {
3138 BTRFS_I(inode)->root = root;
3139 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3140 btrfs_read_locked_inode(inode);
3141 unlock_new_inode(inode);
3152 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3154 struct inode *inode;
3155 struct btrfs_inode *bi = BTRFS_I(dir);
3156 struct btrfs_root *root = bi->root;
3157 struct btrfs_root *sub_root = root;
3158 struct btrfs_key location;
3161 if (dentry->d_name.len > BTRFS_NAME_LEN)
3162 return ERR_PTR(-ENAMETOOLONG);
3164 ret = btrfs_inode_by_name(dir, dentry, &location);
3167 return ERR_PTR(ret);
3170 if (location.objectid) {
3171 ret = fixup_tree_root_location(root, &location, &sub_root,
3174 return ERR_PTR(ret);
3176 return ERR_PTR(-ENOENT);
3177 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3179 return ERR_CAST(inode);
3184 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3185 struct nameidata *nd)
3187 struct inode *inode;
3189 if (dentry->d_name.len > BTRFS_NAME_LEN)
3190 return ERR_PTR(-ENAMETOOLONG);
3192 inode = btrfs_lookup_dentry(dir, dentry);
3194 return ERR_CAST(inode);
3196 return d_splice_alias(inode, dentry);
3199 static unsigned char btrfs_filetype_table[] = {
3200 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3203 static int btrfs_real_readdir(struct file *filp, void *dirent,
3206 struct inode *inode = filp->f_dentry->d_inode;
3207 struct btrfs_root *root = BTRFS_I(inode)->root;
3208 struct btrfs_item *item;
3209 struct btrfs_dir_item *di;
3210 struct btrfs_key key;
3211 struct btrfs_key found_key;
3212 struct btrfs_path *path;
3215 struct extent_buffer *leaf;
3218 unsigned char d_type;
3223 int key_type = BTRFS_DIR_INDEX_KEY;
3228 /* FIXME, use a real flag for deciding about the key type */
3229 if (root->fs_info->tree_root == root)
3230 key_type = BTRFS_DIR_ITEM_KEY;
3232 /* special case for "." */
3233 if (filp->f_pos == 0) {
3234 over = filldir(dirent, ".", 1,
3241 /* special case for .., just use the back ref */
3242 if (filp->f_pos == 1) {
3243 u64 pino = parent_ino(filp->f_path.dentry);
3244 over = filldir(dirent, "..", 2,
3250 path = btrfs_alloc_path();
3253 btrfs_set_key_type(&key, key_type);
3254 key.offset = filp->f_pos;
3255 key.objectid = inode->i_ino;
3257 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3263 leaf = path->nodes[0];
3264 nritems = btrfs_header_nritems(leaf);
3265 slot = path->slots[0];
3266 if (advance || slot >= nritems) {
3267 if (slot >= nritems - 1) {
3268 ret = btrfs_next_leaf(root, path);
3271 leaf = path->nodes[0];
3272 nritems = btrfs_header_nritems(leaf);
3273 slot = path->slots[0];
3281 item = btrfs_item_nr(leaf, slot);
3282 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3284 if (found_key.objectid != key.objectid)
3286 if (btrfs_key_type(&found_key) != key_type)
3288 if (found_key.offset < filp->f_pos)
3291 filp->f_pos = found_key.offset;
3293 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3295 di_total = btrfs_item_size(leaf, item);
3297 while (di_cur < di_total) {
3298 struct btrfs_key location;
3300 name_len = btrfs_dir_name_len(leaf, di);
3301 if (name_len <= sizeof(tmp_name)) {
3302 name_ptr = tmp_name;
3304 name_ptr = kmalloc(name_len, GFP_NOFS);
3310 read_extent_buffer(leaf, name_ptr,
3311 (unsigned long)(di + 1), name_len);
3313 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3314 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3316 /* is this a reference to our own snapshot? If so
3319 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3320 location.objectid == root->root_key.objectid) {
3324 over = filldir(dirent, name_ptr, name_len,
3325 found_key.offset, location.objectid,
3329 if (name_ptr != tmp_name)
3334 di_len = btrfs_dir_name_len(leaf, di) +
3335 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3337 di = (struct btrfs_dir_item *)((char *)di + di_len);
3341 /* Reached end of directory/root. Bump pos past the last item. */
3342 if (key_type == BTRFS_DIR_INDEX_KEY)
3343 filp->f_pos = INT_LIMIT(off_t);
3349 btrfs_free_path(path);
3353 int btrfs_write_inode(struct inode *inode, int wait)
3355 struct btrfs_root *root = BTRFS_I(inode)->root;
3356 struct btrfs_trans_handle *trans;
3359 if (root->fs_info->btree_inode == inode)
3363 trans = btrfs_join_transaction(root, 1);
3364 btrfs_set_trans_block_group(trans, inode);
3365 ret = btrfs_commit_transaction(trans, root);
3371 * This is somewhat expensive, updating the tree every time the
3372 * inode changes. But, it is most likely to find the inode in cache.
3373 * FIXME, needs more benchmarking...there are no reasons other than performance
3374 * to keep or drop this code.
3376 void btrfs_dirty_inode(struct inode *inode)
3378 struct btrfs_root *root = BTRFS_I(inode)->root;
3379 struct btrfs_trans_handle *trans;
3381 trans = btrfs_join_transaction(root, 1);
3382 btrfs_set_trans_block_group(trans, inode);
3383 btrfs_update_inode(trans, root, inode);
3384 btrfs_end_transaction(trans, root);
3388 * find the highest existing sequence number in a directory
3389 * and then set the in-memory index_cnt variable to reflect
3390 * free sequence numbers
3392 static int btrfs_set_inode_index_count(struct inode *inode)
3394 struct btrfs_root *root = BTRFS_I(inode)->root;
3395 struct btrfs_key key, found_key;
3396 struct btrfs_path *path;
3397 struct extent_buffer *leaf;
3400 key.objectid = inode->i_ino;
3401 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3402 key.offset = (u64)-1;
3404 path = btrfs_alloc_path();
3408 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3411 /* FIXME: we should be able to handle this */
3417 * MAGIC NUMBER EXPLANATION:
3418 * since we search a directory based on f_pos we have to start at 2
3419 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3420 * else has to start at 2
3422 if (path->slots[0] == 0) {
3423 BTRFS_I(inode)->index_cnt = 2;
3429 leaf = path->nodes[0];
3430 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3432 if (found_key.objectid != inode->i_ino ||
3433 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3434 BTRFS_I(inode)->index_cnt = 2;
3438 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3440 btrfs_free_path(path);
3445 * helper to find a free sequence number in a given directory. This current
3446 * code is very simple, later versions will do smarter things in the btree
3448 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3452 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3453 ret = btrfs_set_inode_index_count(dir);
3458 *index = BTRFS_I(dir)->index_cnt;
3459 BTRFS_I(dir)->index_cnt++;
3464 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3465 struct btrfs_root *root,
3467 const char *name, int name_len,
3468 u64 ref_objectid, u64 objectid,
3469 u64 alloc_hint, int mode, u64 *index)
3471 struct inode *inode;
3472 struct btrfs_inode_item *inode_item;
3473 struct btrfs_key *location;
3474 struct btrfs_path *path;
3475 struct btrfs_inode_ref *ref;
3476 struct btrfs_key key[2];
3482 path = btrfs_alloc_path();
3485 inode = new_inode(root->fs_info->sb);
3487 return ERR_PTR(-ENOMEM);
3490 ret = btrfs_set_inode_index(dir, index);
3493 return ERR_PTR(ret);
3497 * index_cnt is ignored for everything but a dir,
3498 * btrfs_get_inode_index_count has an explanation for the magic
3501 init_btrfs_i(inode);
3502 BTRFS_I(inode)->index_cnt = 2;
3503 BTRFS_I(inode)->root = root;
3504 BTRFS_I(inode)->generation = trans->transid;
3505 btrfs_set_inode_space_info(root, inode);
3511 BTRFS_I(inode)->block_group =
3512 btrfs_find_block_group(root, 0, alloc_hint, owner);
3513 if ((mode & S_IFREG)) {
3514 if (btrfs_test_opt(root, NODATASUM))
3515 btrfs_set_flag(inode, NODATASUM);
3516 if (btrfs_test_opt(root, NODATACOW))
3517 btrfs_set_flag(inode, NODATACOW);
3520 key[0].objectid = objectid;
3521 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3524 key[1].objectid = objectid;
3525 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3526 key[1].offset = ref_objectid;
3528 sizes[0] = sizeof(struct btrfs_inode_item);
3529 sizes[1] = name_len + sizeof(*ref);
3531 path->leave_spinning = 1;
3532 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3536 if (objectid > root->highest_inode)
3537 root->highest_inode = objectid;
3539 inode->i_uid = current_fsuid();
3541 if (dir && (dir->i_mode & S_ISGID)) {
3542 inode->i_gid = dir->i_gid;
3546 inode->i_gid = current_fsgid();
3548 inode->i_mode = mode;
3549 inode->i_ino = objectid;
3550 inode_set_bytes(inode, 0);
3551 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3552 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3553 struct btrfs_inode_item);
3554 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3556 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3557 struct btrfs_inode_ref);
3558 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3559 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3560 ptr = (unsigned long)(ref + 1);
3561 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3563 btrfs_mark_buffer_dirty(path->nodes[0]);
3564 btrfs_free_path(path);
3566 location = &BTRFS_I(inode)->location;
3567 location->objectid = objectid;
3568 location->offset = 0;
3569 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3571 insert_inode_hash(inode);
3575 BTRFS_I(dir)->index_cnt--;
3576 btrfs_free_path(path);
3578 return ERR_PTR(ret);
3581 static inline u8 btrfs_inode_type(struct inode *inode)
3583 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3587 * utility function to add 'inode' into 'parent_inode' with
3588 * a give name and a given sequence number.
3589 * if 'add_backref' is true, also insert a backref from the
3590 * inode to the parent directory.
3592 int btrfs_add_link(struct btrfs_trans_handle *trans,
3593 struct inode *parent_inode, struct inode *inode,
3594 const char *name, int name_len, int add_backref, u64 index)
3597 struct btrfs_key key;
3598 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3600 key.objectid = inode->i_ino;
3601 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3604 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3605 parent_inode->i_ino,
3606 &key, btrfs_inode_type(inode),
3610 ret = btrfs_insert_inode_ref(trans, root,
3613 parent_inode->i_ino,
3616 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3618 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3619 ret = btrfs_update_inode(trans, root, parent_inode);
3624 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3625 struct dentry *dentry, struct inode *inode,
3626 int backref, u64 index)
3628 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3629 inode, dentry->d_name.name,
3630 dentry->d_name.len, backref, index);
3632 d_instantiate(dentry, inode);
3640 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3641 int mode, dev_t rdev)
3643 struct btrfs_trans_handle *trans;
3644 struct btrfs_root *root = BTRFS_I(dir)->root;
3645 struct inode *inode = NULL;
3649 unsigned long nr = 0;
3652 if (!new_valid_dev(rdev))
3655 err = btrfs_check_metadata_free_space(root);
3659 trans = btrfs_start_transaction(root, 1);
3660 btrfs_set_trans_block_group(trans, dir);
3662 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3668 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3670 dentry->d_parent->d_inode->i_ino, objectid,
3671 BTRFS_I(dir)->block_group, mode, &index);
3672 err = PTR_ERR(inode);
3676 err = btrfs_init_inode_security(inode, dir);
3682 btrfs_set_trans_block_group(trans, inode);
3683 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3687 inode->i_op = &btrfs_special_inode_operations;
3688 init_special_inode(inode, inode->i_mode, rdev);
3689 btrfs_update_inode(trans, root, inode);
3691 dir->i_sb->s_dirt = 1;
3692 btrfs_update_inode_block_group(trans, inode);
3693 btrfs_update_inode_block_group(trans, dir);
3695 nr = trans->blocks_used;
3696 btrfs_end_transaction_throttle(trans, root);
3699 inode_dec_link_count(inode);
3702 btrfs_btree_balance_dirty(root, nr);
3706 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3707 int mode, struct nameidata *nd)
3709 struct btrfs_trans_handle *trans;
3710 struct btrfs_root *root = BTRFS_I(dir)->root;
3711 struct inode *inode = NULL;
3714 unsigned long nr = 0;
3718 err = btrfs_check_metadata_free_space(root);
3721 trans = btrfs_start_transaction(root, 1);
3722 btrfs_set_trans_block_group(trans, dir);
3724 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3730 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3732 dentry->d_parent->d_inode->i_ino,
3733 objectid, BTRFS_I(dir)->block_group, mode,
3735 err = PTR_ERR(inode);
3739 err = btrfs_init_inode_security(inode, dir);
3745 btrfs_set_trans_block_group(trans, inode);
3746 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3750 inode->i_mapping->a_ops = &btrfs_aops;
3751 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3752 inode->i_fop = &btrfs_file_operations;
3753 inode->i_op = &btrfs_file_inode_operations;
3754 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3756 dir->i_sb->s_dirt = 1;
3757 btrfs_update_inode_block_group(trans, inode);
3758 btrfs_update_inode_block_group(trans, dir);
3760 nr = trans->blocks_used;
3761 btrfs_end_transaction_throttle(trans, root);
3764 inode_dec_link_count(inode);
3767 btrfs_btree_balance_dirty(root, nr);
3771 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3772 struct dentry *dentry)
3774 struct btrfs_trans_handle *trans;
3775 struct btrfs_root *root = BTRFS_I(dir)->root;
3776 struct inode *inode = old_dentry->d_inode;
3778 unsigned long nr = 0;
3782 if (inode->i_nlink == 0)
3785 btrfs_inc_nlink(inode);
3786 err = btrfs_check_metadata_free_space(root);
3789 err = btrfs_set_inode_index(dir, &index);
3793 trans = btrfs_start_transaction(root, 1);
3795 btrfs_set_trans_block_group(trans, dir);
3796 atomic_inc(&inode->i_count);
3798 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3803 dir->i_sb->s_dirt = 1;
3804 btrfs_update_inode_block_group(trans, dir);
3805 err = btrfs_update_inode(trans, root, inode);
3810 nr = trans->blocks_used;
3812 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
3813 btrfs_end_transaction_throttle(trans, root);
3816 inode_dec_link_count(inode);
3819 btrfs_btree_balance_dirty(root, nr);
3823 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3825 struct inode *inode = NULL;
3826 struct btrfs_trans_handle *trans;
3827 struct btrfs_root *root = BTRFS_I(dir)->root;
3829 int drop_on_err = 0;
3832 unsigned long nr = 1;
3834 err = btrfs_check_metadata_free_space(root);
3838 trans = btrfs_start_transaction(root, 1);
3839 btrfs_set_trans_block_group(trans, dir);
3841 if (IS_ERR(trans)) {
3842 err = PTR_ERR(trans);
3846 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3852 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3854 dentry->d_parent->d_inode->i_ino, objectid,
3855 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3857 if (IS_ERR(inode)) {
3858 err = PTR_ERR(inode);
3864 err = btrfs_init_inode_security(inode, dir);
3868 inode->i_op = &btrfs_dir_inode_operations;
3869 inode->i_fop = &btrfs_dir_file_operations;
3870 btrfs_set_trans_block_group(trans, inode);
3872 btrfs_i_size_write(inode, 0);
3873 err = btrfs_update_inode(trans, root, inode);
3877 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3878 inode, dentry->d_name.name,
3879 dentry->d_name.len, 0, index);
3883 d_instantiate(dentry, inode);
3885 dir->i_sb->s_dirt = 1;
3886 btrfs_update_inode_block_group(trans, inode);
3887 btrfs_update_inode_block_group(trans, dir);
3890 nr = trans->blocks_used;
3891 btrfs_end_transaction_throttle(trans, root);
3896 btrfs_btree_balance_dirty(root, nr);
3900 /* helper for btfs_get_extent. Given an existing extent in the tree,
3901 * and an extent that you want to insert, deal with overlap and insert
3902 * the new extent into the tree.
3904 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3905 struct extent_map *existing,
3906 struct extent_map *em,
3907 u64 map_start, u64 map_len)
3911 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3912 start_diff = map_start - em->start;
3913 em->start = map_start;
3915 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3916 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3917 em->block_start += start_diff;
3918 em->block_len -= start_diff;
3920 return add_extent_mapping(em_tree, em);
3923 static noinline int uncompress_inline(struct btrfs_path *path,
3924 struct inode *inode, struct page *page,
3925 size_t pg_offset, u64 extent_offset,
3926 struct btrfs_file_extent_item *item)
3929 struct extent_buffer *leaf = path->nodes[0];
3932 unsigned long inline_size;
3935 WARN_ON(pg_offset != 0);
3936 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3937 inline_size = btrfs_file_extent_inline_item_len(leaf,
3938 btrfs_item_nr(leaf, path->slots[0]));
3939 tmp = kmalloc(inline_size, GFP_NOFS);
3940 ptr = btrfs_file_extent_inline_start(item);
3942 read_extent_buffer(leaf, tmp, ptr, inline_size);
3944 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3945 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3946 inline_size, max_size);
3948 char *kaddr = kmap_atomic(page, KM_USER0);
3949 unsigned long copy_size = min_t(u64,
3950 PAGE_CACHE_SIZE - pg_offset,
3951 max_size - extent_offset);
3952 memset(kaddr + pg_offset, 0, copy_size);
3953 kunmap_atomic(kaddr, KM_USER0);
3960 * a bit scary, this does extent mapping from logical file offset to the disk.
3961 * the ugly parts come from merging extents from the disk with the in-ram
3962 * representation. This gets more complex because of the data=ordered code,
3963 * where the in-ram extents might be locked pending data=ordered completion.
3965 * This also copies inline extents directly into the page.
3968 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3969 size_t pg_offset, u64 start, u64 len,
3975 u64 extent_start = 0;
3977 u64 objectid = inode->i_ino;
3979 struct btrfs_path *path = NULL;
3980 struct btrfs_root *root = BTRFS_I(inode)->root;
3981 struct btrfs_file_extent_item *item;
3982 struct extent_buffer *leaf;
3983 struct btrfs_key found_key;
3984 struct extent_map *em = NULL;
3985 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3986 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3987 struct btrfs_trans_handle *trans = NULL;
3991 spin_lock(&em_tree->lock);
3992 em = lookup_extent_mapping(em_tree, start, len);
3994 em->bdev = root->fs_info->fs_devices->latest_bdev;
3995 spin_unlock(&em_tree->lock);
3998 if (em->start > start || em->start + em->len <= start)
3999 free_extent_map(em);
4000 else if (em->block_start == EXTENT_MAP_INLINE && page)
4001 free_extent_map(em);
4005 em = alloc_extent_map(GFP_NOFS);
4010 em->bdev = root->fs_info->fs_devices->latest_bdev;
4011 em->start = EXTENT_MAP_HOLE;
4012 em->orig_start = EXTENT_MAP_HOLE;
4014 em->block_len = (u64)-1;
4017 path = btrfs_alloc_path();
4021 ret = btrfs_lookup_file_extent(trans, root, path,
4022 objectid, start, trans != NULL);
4029 if (path->slots[0] == 0)
4034 leaf = path->nodes[0];
4035 item = btrfs_item_ptr(leaf, path->slots[0],
4036 struct btrfs_file_extent_item);
4037 /* are we inside the extent that was found? */
4038 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4039 found_type = btrfs_key_type(&found_key);
4040 if (found_key.objectid != objectid ||
4041 found_type != BTRFS_EXTENT_DATA_KEY) {
4045 found_type = btrfs_file_extent_type(leaf, item);
4046 extent_start = found_key.offset;
4047 compressed = btrfs_file_extent_compression(leaf, item);
4048 if (found_type == BTRFS_FILE_EXTENT_REG ||
4049 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4050 extent_end = extent_start +
4051 btrfs_file_extent_num_bytes(leaf, item);
4052 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4054 size = btrfs_file_extent_inline_len(leaf, item);
4055 extent_end = (extent_start + size + root->sectorsize - 1) &
4056 ~((u64)root->sectorsize - 1);
4059 if (start >= extent_end) {
4061 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4062 ret = btrfs_next_leaf(root, path);
4069 leaf = path->nodes[0];
4071 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4072 if (found_key.objectid != objectid ||
4073 found_key.type != BTRFS_EXTENT_DATA_KEY)
4075 if (start + len <= found_key.offset)
4078 em->len = found_key.offset - start;
4082 if (found_type == BTRFS_FILE_EXTENT_REG ||
4083 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4084 em->start = extent_start;
4085 em->len = extent_end - extent_start;
4086 em->orig_start = extent_start -
4087 btrfs_file_extent_offset(leaf, item);
4088 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4090 em->block_start = EXTENT_MAP_HOLE;
4094 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4095 em->block_start = bytenr;
4096 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4099 bytenr += btrfs_file_extent_offset(leaf, item);
4100 em->block_start = bytenr;
4101 em->block_len = em->len;
4102 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4103 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4106 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4110 size_t extent_offset;
4113 em->block_start = EXTENT_MAP_INLINE;
4114 if (!page || create) {
4115 em->start = extent_start;
4116 em->len = extent_end - extent_start;
4120 size = btrfs_file_extent_inline_len(leaf, item);
4121 extent_offset = page_offset(page) + pg_offset - extent_start;
4122 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4123 size - extent_offset);
4124 em->start = extent_start + extent_offset;
4125 em->len = (copy_size + root->sectorsize - 1) &
4126 ~((u64)root->sectorsize - 1);
4127 em->orig_start = EXTENT_MAP_INLINE;
4129 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4130 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4131 if (create == 0 && !PageUptodate(page)) {
4132 if (btrfs_file_extent_compression(leaf, item) ==
4133 BTRFS_COMPRESS_ZLIB) {
4134 ret = uncompress_inline(path, inode, page,
4136 extent_offset, item);
4140 read_extent_buffer(leaf, map + pg_offset, ptr,
4144 flush_dcache_page(page);
4145 } else if (create && PageUptodate(page)) {
4148 free_extent_map(em);
4150 btrfs_release_path(root, path);
4151 trans = btrfs_join_transaction(root, 1);
4155 write_extent_buffer(leaf, map + pg_offset, ptr,
4158 btrfs_mark_buffer_dirty(leaf);
4160 set_extent_uptodate(io_tree, em->start,
4161 extent_map_end(em) - 1, GFP_NOFS);
4164 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4171 em->block_start = EXTENT_MAP_HOLE;
4172 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4174 btrfs_release_path(root, path);
4175 if (em->start > start || extent_map_end(em) <= start) {
4176 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4177 "[%llu %llu]\n", (unsigned long long)em->start,
4178 (unsigned long long)em->len,
4179 (unsigned long long)start,
4180 (unsigned long long)len);
4186 spin_lock(&em_tree->lock);
4187 ret = add_extent_mapping(em_tree, em);
4188 /* it is possible that someone inserted the extent into the tree
4189 * while we had the lock dropped. It is also possible that
4190 * an overlapping map exists in the tree
4192 if (ret == -EEXIST) {
4193 struct extent_map *existing;
4197 existing = lookup_extent_mapping(em_tree, start, len);
4198 if (existing && (existing->start > start ||
4199 existing->start + existing->len <= start)) {
4200 free_extent_map(existing);
4204 existing = lookup_extent_mapping(em_tree, em->start,
4207 err = merge_extent_mapping(em_tree, existing,
4210 free_extent_map(existing);
4212 free_extent_map(em);
4217 free_extent_map(em);
4221 free_extent_map(em);
4226 spin_unlock(&em_tree->lock);
4229 btrfs_free_path(path);
4231 ret = btrfs_end_transaction(trans, root);
4236 free_extent_map(em);
4238 return ERR_PTR(err);
4243 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4244 const struct iovec *iov, loff_t offset,
4245 unsigned long nr_segs)
4250 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4251 __u64 start, __u64 len)
4253 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4256 int btrfs_readpage(struct file *file, struct page *page)
4258 struct extent_io_tree *tree;
4259 tree = &BTRFS_I(page->mapping->host)->io_tree;
4260 return extent_read_full_page(tree, page, btrfs_get_extent);
4263 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4265 struct extent_io_tree *tree;
4268 if (current->flags & PF_MEMALLOC) {
4269 redirty_page_for_writepage(wbc, page);
4273 tree = &BTRFS_I(page->mapping->host)->io_tree;
4274 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4277 int btrfs_writepages(struct address_space *mapping,
4278 struct writeback_control *wbc)
4280 struct extent_io_tree *tree;
4282 tree = &BTRFS_I(mapping->host)->io_tree;
4283 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4287 btrfs_readpages(struct file *file, struct address_space *mapping,
4288 struct list_head *pages, unsigned nr_pages)
4290 struct extent_io_tree *tree;
4291 tree = &BTRFS_I(mapping->host)->io_tree;
4292 return extent_readpages(tree, mapping, pages, nr_pages,
4295 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4297 struct extent_io_tree *tree;
4298 struct extent_map_tree *map;
4301 tree = &BTRFS_I(page->mapping->host)->io_tree;
4302 map = &BTRFS_I(page->mapping->host)->extent_tree;
4303 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4305 ClearPagePrivate(page);
4306 set_page_private(page, 0);
4307 page_cache_release(page);
4312 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4314 if (PageWriteback(page) || PageDirty(page))
4316 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4319 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4321 struct extent_io_tree *tree;
4322 struct btrfs_ordered_extent *ordered;
4323 u64 page_start = page_offset(page);
4324 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4326 wait_on_page_writeback(page);
4327 tree = &BTRFS_I(page->mapping->host)->io_tree;
4329 btrfs_releasepage(page, GFP_NOFS);
4333 lock_extent(tree, page_start, page_end, GFP_NOFS);
4334 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4338 * IO on this page will never be started, so we need
4339 * to account for any ordered extents now
4341 clear_extent_bit(tree, page_start, page_end,
4342 EXTENT_DIRTY | EXTENT_DELALLOC |
4343 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4344 btrfs_finish_ordered_io(page->mapping->host,
4345 page_start, page_end);
4346 btrfs_put_ordered_extent(ordered);
4347 lock_extent(tree, page_start, page_end, GFP_NOFS);
4349 clear_extent_bit(tree, page_start, page_end,
4350 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4353 __btrfs_releasepage(page, GFP_NOFS);
4355 ClearPageChecked(page);
4356 if (PagePrivate(page)) {
4357 ClearPagePrivate(page);
4358 set_page_private(page, 0);
4359 page_cache_release(page);
4364 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4365 * called from a page fault handler when a page is first dirtied. Hence we must
4366 * be careful to check for EOF conditions here. We set the page up correctly
4367 * for a written page which means we get ENOSPC checking when writing into
4368 * holes and correct delalloc and unwritten extent mapping on filesystems that
4369 * support these features.
4371 * We are not allowed to take the i_mutex here so we have to play games to
4372 * protect against truncate races as the page could now be beyond EOF. Because
4373 * vmtruncate() writes the inode size before removing pages, once we have the
4374 * page lock we can determine safely if the page is beyond EOF. If it is not
4375 * beyond EOF, then the page is guaranteed safe against truncation until we
4378 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4380 struct page *page = vmf->page;
4381 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4382 struct btrfs_root *root = BTRFS_I(inode)->root;
4383 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4384 struct btrfs_ordered_extent *ordered;
4386 unsigned long zero_start;
4392 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4396 else /* -ENOSPC, -EIO, etc */
4397 ret = VM_FAULT_SIGBUS;
4401 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4404 size = i_size_read(inode);
4405 page_start = page_offset(page);
4406 page_end = page_start + PAGE_CACHE_SIZE - 1;
4408 if ((page->mapping != inode->i_mapping) ||
4409 (page_start >= size)) {
4410 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4411 /* page got truncated out from underneath us */
4414 wait_on_page_writeback(page);
4416 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4417 set_page_extent_mapped(page);
4420 * we can't set the delalloc bits if there are pending ordered
4421 * extents. Drop our locks and wait for them to finish
4423 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4425 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4427 btrfs_start_ordered_extent(inode, ordered, 1);
4428 btrfs_put_ordered_extent(ordered);
4432 btrfs_set_extent_delalloc(inode, page_start, page_end);
4435 /* page is wholly or partially inside EOF */
4436 if (page_start + PAGE_CACHE_SIZE > size)
4437 zero_start = size & ~PAGE_CACHE_MASK;
4439 zero_start = PAGE_CACHE_SIZE;
4441 if (zero_start != PAGE_CACHE_SIZE) {
4443 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4444 flush_dcache_page(page);
4447 ClearPageChecked(page);
4448 set_page_dirty(page);
4450 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4451 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4459 static void btrfs_truncate(struct inode *inode)
4461 struct btrfs_root *root = BTRFS_I(inode)->root;
4463 struct btrfs_trans_handle *trans;
4465 u64 mask = root->sectorsize - 1;
4467 if (!S_ISREG(inode->i_mode))
4469 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4472 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4473 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4475 trans = btrfs_start_transaction(root, 1);
4478 * setattr is responsible for setting the ordered_data_close flag,
4479 * but that is only tested during the last file release. That
4480 * could happen well after the next commit, leaving a great big
4481 * window where new writes may get lost if someone chooses to write
4482 * to this file after truncating to zero
4484 * The inode doesn't have any dirty data here, and so if we commit
4485 * this is a noop. If someone immediately starts writing to the inode
4486 * it is very likely we'll catch some of their writes in this
4487 * transaction, and the commit will find this file on the ordered
4488 * data list with good things to send down.
4490 * This is a best effort solution, there is still a window where
4491 * using truncate to replace the contents of the file will
4492 * end up with a zero length file after a crash.
4494 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
4495 btrfs_add_ordered_operation(trans, root, inode);
4497 btrfs_set_trans_block_group(trans, inode);
4498 btrfs_i_size_write(inode, inode->i_size);
4500 ret = btrfs_orphan_add(trans, inode);
4503 /* FIXME, add redo link to tree so we don't leak on crash */
4504 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4505 BTRFS_EXTENT_DATA_KEY);
4506 btrfs_update_inode(trans, root, inode);
4508 ret = btrfs_orphan_del(trans, inode);
4512 nr = trans->blocks_used;
4513 ret = btrfs_end_transaction_throttle(trans, root);
4515 btrfs_btree_balance_dirty(root, nr);
4519 * create a new subvolume directory/inode (helper for the ioctl).
4521 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4522 struct btrfs_root *new_root, struct dentry *dentry,
4523 u64 new_dirid, u64 alloc_hint)
4525 struct inode *inode;
4529 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4530 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4532 return PTR_ERR(inode);
4533 inode->i_op = &btrfs_dir_inode_operations;
4534 inode->i_fop = &btrfs_dir_file_operations;
4537 btrfs_i_size_write(inode, 0);
4539 error = btrfs_update_inode(trans, new_root, inode);
4543 d_instantiate(dentry, inode);
4547 /* helper function for file defrag and space balancing. This
4548 * forces readahead on a given range of bytes in an inode
4550 unsigned long btrfs_force_ra(struct address_space *mapping,
4551 struct file_ra_state *ra, struct file *file,
4552 pgoff_t offset, pgoff_t last_index)
4554 pgoff_t req_size = last_index - offset + 1;
4556 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4557 return offset + req_size;
4560 struct inode *btrfs_alloc_inode(struct super_block *sb)
4562 struct btrfs_inode *ei;
4564 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4568 ei->logged_trans = 0;
4569 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4570 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4571 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4572 INIT_LIST_HEAD(&ei->i_orphan);
4573 INIT_LIST_HEAD(&ei->ordered_operations);
4574 return &ei->vfs_inode;
4577 void btrfs_destroy_inode(struct inode *inode)
4579 struct btrfs_ordered_extent *ordered;
4580 struct btrfs_root *root = BTRFS_I(inode)->root;
4582 WARN_ON(!list_empty(&inode->i_dentry));
4583 WARN_ON(inode->i_data.nrpages);
4585 if (BTRFS_I(inode)->i_acl &&
4586 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4587 posix_acl_release(BTRFS_I(inode)->i_acl);
4588 if (BTRFS_I(inode)->i_default_acl &&
4589 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4590 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4593 * Make sure we're properly removed from the ordered operation
4597 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
4598 spin_lock(&root->fs_info->ordered_extent_lock);
4599 list_del_init(&BTRFS_I(inode)->ordered_operations);
4600 spin_unlock(&root->fs_info->ordered_extent_lock);
4603 spin_lock(&root->list_lock);
4604 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4605 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4606 " list\n", inode->i_ino);
4609 spin_unlock(&root->list_lock);
4612 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4616 printk(KERN_ERR "btrfs found ordered "
4617 "extent %llu %llu on inode cleanup\n",
4618 (unsigned long long)ordered->file_offset,
4619 (unsigned long long)ordered->len);
4620 btrfs_remove_ordered_extent(inode, ordered);
4621 btrfs_put_ordered_extent(ordered);
4622 btrfs_put_ordered_extent(ordered);
4625 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4626 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4629 static void init_once(void *foo)
4631 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4633 inode_init_once(&ei->vfs_inode);
4636 void btrfs_destroy_cachep(void)
4638 if (btrfs_inode_cachep)
4639 kmem_cache_destroy(btrfs_inode_cachep);
4640 if (btrfs_trans_handle_cachep)
4641 kmem_cache_destroy(btrfs_trans_handle_cachep);
4642 if (btrfs_transaction_cachep)
4643 kmem_cache_destroy(btrfs_transaction_cachep);
4644 if (btrfs_bit_radix_cachep)
4645 kmem_cache_destroy(btrfs_bit_radix_cachep);
4646 if (btrfs_path_cachep)
4647 kmem_cache_destroy(btrfs_path_cachep);
4650 int btrfs_init_cachep(void)
4652 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
4653 sizeof(struct btrfs_inode), 0,
4654 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
4655 if (!btrfs_inode_cachep)
4658 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
4659 sizeof(struct btrfs_trans_handle), 0,
4660 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4661 if (!btrfs_trans_handle_cachep)
4664 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
4665 sizeof(struct btrfs_transaction), 0,
4666 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4667 if (!btrfs_transaction_cachep)
4670 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
4671 sizeof(struct btrfs_path), 0,
4672 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4673 if (!btrfs_path_cachep)
4676 btrfs_bit_radix_cachep = kmem_cache_create("btrfs_radix", 256, 0,
4677 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
4678 SLAB_DESTROY_BY_RCU, NULL);
4679 if (!btrfs_bit_radix_cachep)
4683 btrfs_destroy_cachep();
4687 static int btrfs_getattr(struct vfsmount *mnt,
4688 struct dentry *dentry, struct kstat *stat)
4690 struct inode *inode = dentry->d_inode;
4691 generic_fillattr(inode, stat);
4692 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4693 stat->blksize = PAGE_CACHE_SIZE;
4694 stat->blocks = (inode_get_bytes(inode) +
4695 BTRFS_I(inode)->delalloc_bytes) >> 9;
4699 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4700 struct inode *new_dir, struct dentry *new_dentry)
4702 struct btrfs_trans_handle *trans;
4703 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4704 struct inode *new_inode = new_dentry->d_inode;
4705 struct inode *old_inode = old_dentry->d_inode;
4706 struct timespec ctime = CURRENT_TIME;
4710 /* we're not allowed to rename between subvolumes */
4711 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4712 BTRFS_I(new_dir)->root->root_key.objectid)
4715 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4716 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4720 /* to rename a snapshot or subvolume, we need to juggle the
4721 * backrefs. This isn't coded yet
4723 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4726 ret = btrfs_check_metadata_free_space(root);
4731 * we're using rename to replace one file with another.
4732 * and the replacement file is large. Start IO on it now so
4733 * we don't add too much work to the end of the transaction
4735 if (new_inode && old_inode && S_ISREG(old_inode->i_mode) &&
4736 new_inode->i_size &&
4737 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4738 filemap_flush(old_inode->i_mapping);
4740 trans = btrfs_start_transaction(root, 1);
4743 * make sure the inode gets flushed if it is replacing
4746 if (new_inode && new_inode->i_size &&
4747 old_inode && S_ISREG(old_inode->i_mode)) {
4748 btrfs_add_ordered_operation(trans, root, old_inode);
4752 * this is an ugly little race, but the rename is required to make
4753 * sure that if we crash, the inode is either at the old name
4754 * or the new one. pinning the log transaction lets us make sure
4755 * we don't allow a log commit to come in after we unlink the
4756 * name but before we add the new name back in.
4758 btrfs_pin_log_trans(root);
4760 btrfs_set_trans_block_group(trans, new_dir);
4762 btrfs_inc_nlink(old_dentry->d_inode);
4763 old_dir->i_ctime = old_dir->i_mtime = ctime;
4764 new_dir->i_ctime = new_dir->i_mtime = ctime;
4765 old_inode->i_ctime = ctime;
4767 if (old_dentry->d_parent != new_dentry->d_parent)
4768 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
4770 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4771 old_dentry->d_name.name,
4772 old_dentry->d_name.len);
4777 new_inode->i_ctime = CURRENT_TIME;
4778 ret = btrfs_unlink_inode(trans, root, new_dir,
4779 new_dentry->d_inode,
4780 new_dentry->d_name.name,
4781 new_dentry->d_name.len);
4784 if (new_inode->i_nlink == 0) {
4785 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4791 ret = btrfs_set_inode_index(new_dir, &index);
4795 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4796 old_inode, new_dentry->d_name.name,
4797 new_dentry->d_name.len, 1, index);
4801 btrfs_log_new_name(trans, old_inode, old_dir,
4802 new_dentry->d_parent);
4805 /* this btrfs_end_log_trans just allows the current
4806 * log-sub transaction to complete
4808 btrfs_end_log_trans(root);
4809 btrfs_end_transaction_throttle(trans, root);
4815 * some fairly slow code that needs optimization. This walks the list
4816 * of all the inodes with pending delalloc and forces them to disk.
4818 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4820 struct list_head *head = &root->fs_info->delalloc_inodes;
4821 struct btrfs_inode *binode;
4822 struct inode *inode;
4824 if (root->fs_info->sb->s_flags & MS_RDONLY)
4827 spin_lock(&root->fs_info->delalloc_lock);
4828 while (!list_empty(head)) {
4829 binode = list_entry(head->next, struct btrfs_inode,
4831 inode = igrab(&binode->vfs_inode);
4833 list_del_init(&binode->delalloc_inodes);
4834 spin_unlock(&root->fs_info->delalloc_lock);
4836 filemap_flush(inode->i_mapping);
4840 spin_lock(&root->fs_info->delalloc_lock);
4842 spin_unlock(&root->fs_info->delalloc_lock);
4844 /* the filemap_flush will queue IO into the worker threads, but
4845 * we have to make sure the IO is actually started and that
4846 * ordered extents get created before we return
4848 atomic_inc(&root->fs_info->async_submit_draining);
4849 while (atomic_read(&root->fs_info->nr_async_submits) ||
4850 atomic_read(&root->fs_info->async_delalloc_pages)) {
4851 wait_event(root->fs_info->async_submit_wait,
4852 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4853 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4855 atomic_dec(&root->fs_info->async_submit_draining);
4859 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4860 const char *symname)
4862 struct btrfs_trans_handle *trans;
4863 struct btrfs_root *root = BTRFS_I(dir)->root;
4864 struct btrfs_path *path;
4865 struct btrfs_key key;
4866 struct inode *inode = NULL;
4874 struct btrfs_file_extent_item *ei;
4875 struct extent_buffer *leaf;
4876 unsigned long nr = 0;
4878 name_len = strlen(symname) + 1;
4879 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4880 return -ENAMETOOLONG;
4882 err = btrfs_check_metadata_free_space(root);
4886 trans = btrfs_start_transaction(root, 1);
4887 btrfs_set_trans_block_group(trans, dir);
4889 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4895 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4897 dentry->d_parent->d_inode->i_ino, objectid,
4898 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4900 err = PTR_ERR(inode);
4904 err = btrfs_init_inode_security(inode, dir);
4910 btrfs_set_trans_block_group(trans, inode);
4911 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4915 inode->i_mapping->a_ops = &btrfs_aops;
4916 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4917 inode->i_fop = &btrfs_file_operations;
4918 inode->i_op = &btrfs_file_inode_operations;
4919 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4921 dir->i_sb->s_dirt = 1;
4922 btrfs_update_inode_block_group(trans, inode);
4923 btrfs_update_inode_block_group(trans, dir);
4927 path = btrfs_alloc_path();
4929 key.objectid = inode->i_ino;
4931 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4932 datasize = btrfs_file_extent_calc_inline_size(name_len);
4933 err = btrfs_insert_empty_item(trans, root, path, &key,
4939 leaf = path->nodes[0];
4940 ei = btrfs_item_ptr(leaf, path->slots[0],
4941 struct btrfs_file_extent_item);
4942 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4943 btrfs_set_file_extent_type(leaf, ei,
4944 BTRFS_FILE_EXTENT_INLINE);
4945 btrfs_set_file_extent_encryption(leaf, ei, 0);
4946 btrfs_set_file_extent_compression(leaf, ei, 0);
4947 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4948 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4950 ptr = btrfs_file_extent_inline_start(ei);
4951 write_extent_buffer(leaf, symname, ptr, name_len);
4952 btrfs_mark_buffer_dirty(leaf);
4953 btrfs_free_path(path);
4955 inode->i_op = &btrfs_symlink_inode_operations;
4956 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4957 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4958 inode_set_bytes(inode, name_len);
4959 btrfs_i_size_write(inode, name_len - 1);
4960 err = btrfs_update_inode(trans, root, inode);
4965 nr = trans->blocks_used;
4966 btrfs_end_transaction_throttle(trans, root);
4969 inode_dec_link_count(inode);
4972 btrfs_btree_balance_dirty(root, nr);
4976 static int prealloc_file_range(struct btrfs_trans_handle *trans,
4977 struct inode *inode, u64 start, u64 end,
4978 u64 locked_end, u64 alloc_hint, int mode)
4980 struct btrfs_root *root = BTRFS_I(inode)->root;
4981 struct btrfs_key ins;
4983 u64 cur_offset = start;
4984 u64 num_bytes = end - start;
4987 while (num_bytes > 0) {
4988 alloc_size = min(num_bytes, root->fs_info->max_extent);
4989 ret = btrfs_reserve_extent(trans, root, alloc_size,
4990 root->sectorsize, 0, alloc_hint,
4996 ret = insert_reserved_file_extent(trans, inode,
4997 cur_offset, ins.objectid,
4998 ins.offset, ins.offset,
4999 ins.offset, locked_end,
5001 BTRFS_FILE_EXTENT_PREALLOC);
5003 num_bytes -= ins.offset;
5004 cur_offset += ins.offset;
5005 alloc_hint = ins.objectid + ins.offset;
5008 if (cur_offset > start) {
5009 inode->i_ctime = CURRENT_TIME;
5010 btrfs_set_flag(inode, PREALLOC);
5011 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5012 cur_offset > i_size_read(inode))
5013 btrfs_i_size_write(inode, cur_offset);
5014 ret = btrfs_update_inode(trans, root, inode);
5021 static long btrfs_fallocate(struct inode *inode, int mode,
5022 loff_t offset, loff_t len)
5030 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5031 struct extent_map *em;
5032 struct btrfs_trans_handle *trans;
5035 alloc_start = offset & ~mask;
5036 alloc_end = (offset + len + mask) & ~mask;
5039 * wait for ordered IO before we have any locks. We'll loop again
5040 * below with the locks held.
5042 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5044 mutex_lock(&inode->i_mutex);
5045 if (alloc_start > inode->i_size) {
5046 ret = btrfs_cont_expand(inode, alloc_start);
5051 locked_end = alloc_end - 1;
5053 struct btrfs_ordered_extent *ordered;
5055 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5061 /* the extent lock is ordered inside the running
5064 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5066 ordered = btrfs_lookup_first_ordered_extent(inode,
5069 ordered->file_offset + ordered->len > alloc_start &&
5070 ordered->file_offset < alloc_end) {
5071 btrfs_put_ordered_extent(ordered);
5072 unlock_extent(&BTRFS_I(inode)->io_tree,
5073 alloc_start, locked_end, GFP_NOFS);
5074 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5077 * we can't wait on the range with the transaction
5078 * running or with the extent lock held
5080 btrfs_wait_ordered_range(inode, alloc_start,
5081 alloc_end - alloc_start);
5084 btrfs_put_ordered_extent(ordered);
5089 cur_offset = alloc_start;
5091 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5092 alloc_end - cur_offset, 0);
5093 BUG_ON(IS_ERR(em) || !em);
5094 last_byte = min(extent_map_end(em), alloc_end);
5095 last_byte = (last_byte + mask) & ~mask;
5096 if (em->block_start == EXTENT_MAP_HOLE) {
5097 ret = prealloc_file_range(trans, inode, cur_offset,
5098 last_byte, locked_end + 1,
5101 free_extent_map(em);
5105 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5106 alloc_hint = em->block_start;
5107 free_extent_map(em);
5109 cur_offset = last_byte;
5110 if (cur_offset >= alloc_end) {
5115 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5118 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5120 mutex_unlock(&inode->i_mutex);
5124 static int btrfs_set_page_dirty(struct page *page)
5126 return __set_page_dirty_nobuffers(page);
5129 static int btrfs_permission(struct inode *inode, int mask)
5131 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
5133 return generic_permission(inode, mask, btrfs_check_acl);
5136 static struct inode_operations btrfs_dir_inode_operations = {
5137 .getattr = btrfs_getattr,
5138 .lookup = btrfs_lookup,
5139 .create = btrfs_create,
5140 .unlink = btrfs_unlink,
5142 .mkdir = btrfs_mkdir,
5143 .rmdir = btrfs_rmdir,
5144 .rename = btrfs_rename,
5145 .symlink = btrfs_symlink,
5146 .setattr = btrfs_setattr,
5147 .mknod = btrfs_mknod,
5148 .setxattr = btrfs_setxattr,
5149 .getxattr = btrfs_getxattr,
5150 .listxattr = btrfs_listxattr,
5151 .removexattr = btrfs_removexattr,
5152 .permission = btrfs_permission,
5154 static struct inode_operations btrfs_dir_ro_inode_operations = {
5155 .lookup = btrfs_lookup,
5156 .permission = btrfs_permission,
5158 static struct file_operations btrfs_dir_file_operations = {
5159 .llseek = generic_file_llseek,
5160 .read = generic_read_dir,
5161 .readdir = btrfs_real_readdir,
5162 .unlocked_ioctl = btrfs_ioctl,
5163 #ifdef CONFIG_COMPAT
5164 .compat_ioctl = btrfs_ioctl,
5166 .release = btrfs_release_file,
5167 .fsync = btrfs_sync_file,
5170 static struct extent_io_ops btrfs_extent_io_ops = {
5171 .fill_delalloc = run_delalloc_range,
5172 .submit_bio_hook = btrfs_submit_bio_hook,
5173 .merge_bio_hook = btrfs_merge_bio_hook,
5174 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5175 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5176 .writepage_start_hook = btrfs_writepage_start_hook,
5177 .readpage_io_failed_hook = btrfs_io_failed_hook,
5178 .set_bit_hook = btrfs_set_bit_hook,
5179 .clear_bit_hook = btrfs_clear_bit_hook,
5183 * btrfs doesn't support the bmap operation because swapfiles
5184 * use bmap to make a mapping of extents in the file. They assume
5185 * these extents won't change over the life of the file and they
5186 * use the bmap result to do IO directly to the drive.
5188 * the btrfs bmap call would return logical addresses that aren't
5189 * suitable for IO and they also will change frequently as COW
5190 * operations happen. So, swapfile + btrfs == corruption.
5192 * For now we're avoiding this by dropping bmap.
5194 static struct address_space_operations btrfs_aops = {
5195 .readpage = btrfs_readpage,
5196 .writepage = btrfs_writepage,
5197 .writepages = btrfs_writepages,
5198 .readpages = btrfs_readpages,
5199 .sync_page = block_sync_page,
5200 .direct_IO = btrfs_direct_IO,
5201 .invalidatepage = btrfs_invalidatepage,
5202 .releasepage = btrfs_releasepage,
5203 .set_page_dirty = btrfs_set_page_dirty,
5206 static struct address_space_operations btrfs_symlink_aops = {
5207 .readpage = btrfs_readpage,
5208 .writepage = btrfs_writepage,
5209 .invalidatepage = btrfs_invalidatepage,
5210 .releasepage = btrfs_releasepage,
5213 static struct inode_operations btrfs_file_inode_operations = {
5214 .truncate = btrfs_truncate,
5215 .getattr = btrfs_getattr,
5216 .setattr = btrfs_setattr,
5217 .setxattr = btrfs_setxattr,
5218 .getxattr = btrfs_getxattr,
5219 .listxattr = btrfs_listxattr,
5220 .removexattr = btrfs_removexattr,
5221 .permission = btrfs_permission,
5222 .fallocate = btrfs_fallocate,
5223 .fiemap = btrfs_fiemap,
5225 static struct inode_operations btrfs_special_inode_operations = {
5226 .getattr = btrfs_getattr,
5227 .setattr = btrfs_setattr,
5228 .permission = btrfs_permission,
5229 .setxattr = btrfs_setxattr,
5230 .getxattr = btrfs_getxattr,
5231 .listxattr = btrfs_listxattr,
5232 .removexattr = btrfs_removexattr,
5234 static struct inode_operations btrfs_symlink_inode_operations = {
5235 .readlink = generic_readlink,
5236 .follow_link = page_follow_link_light,
5237 .put_link = page_put_link,
5238 .permission = btrfs_permission,
5239 .setxattr = btrfs_setxattr,
5240 .getxattr = btrfs_getxattr,
5241 .listxattr = btrfs_listxattr,
5242 .removexattr = btrfs_removexattr,