]> Pileus Git - ~andy/linux/blob - fs/btrfs/file.c
Merge tag 'nfs-for-3.8-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[~andy/linux] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43
44 /*
45  * when auto defrag is enabled we
46  * queue up these defrag structs to remember which
47  * inodes need defragging passes
48  */
49 struct inode_defrag {
50         struct rb_node rb_node;
51         /* objectid */
52         u64 ino;
53         /*
54          * transid where the defrag was added, we search for
55          * extents newer than this
56          */
57         u64 transid;
58
59         /* root objectid */
60         u64 root;
61
62         /* last offset we were able to defrag */
63         u64 last_offset;
64
65         /* if we've wrapped around back to zero once already */
66         int cycled;
67 };
68
69 static int __compare_inode_defrag(struct inode_defrag *defrag1,
70                                   struct inode_defrag *defrag2)
71 {
72         if (defrag1->root > defrag2->root)
73                 return 1;
74         else if (defrag1->root < defrag2->root)
75                 return -1;
76         else if (defrag1->ino > defrag2->ino)
77                 return 1;
78         else if (defrag1->ino < defrag2->ino)
79                 return -1;
80         else
81                 return 0;
82 }
83
84 /* pop a record for an inode into the defrag tree.  The lock
85  * must be held already
86  *
87  * If you're inserting a record for an older transid than an
88  * existing record, the transid already in the tree is lowered
89  *
90  * If an existing record is found the defrag item you
91  * pass in is freed
92  */
93 static void __btrfs_add_inode_defrag(struct inode *inode,
94                                     struct inode_defrag *defrag)
95 {
96         struct btrfs_root *root = BTRFS_I(inode)->root;
97         struct inode_defrag *entry;
98         struct rb_node **p;
99         struct rb_node *parent = NULL;
100         int ret;
101
102         p = &root->fs_info->defrag_inodes.rb_node;
103         while (*p) {
104                 parent = *p;
105                 entry = rb_entry(parent, struct inode_defrag, rb_node);
106
107                 ret = __compare_inode_defrag(defrag, entry);
108                 if (ret < 0)
109                         p = &parent->rb_left;
110                 else if (ret > 0)
111                         p = &parent->rb_right;
112                 else {
113                         /* if we're reinserting an entry for
114                          * an old defrag run, make sure to
115                          * lower the transid of our existing record
116                          */
117                         if (defrag->transid < entry->transid)
118                                 entry->transid = defrag->transid;
119                         if (defrag->last_offset > entry->last_offset)
120                                 entry->last_offset = defrag->last_offset;
121                         goto exists;
122                 }
123         }
124         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
125         rb_link_node(&defrag->rb_node, parent, p);
126         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
127         return;
128
129 exists:
130         kfree(defrag);
131         return;
132
133 }
134
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140                            struct inode *inode)
141 {
142         struct btrfs_root *root = BTRFS_I(inode)->root;
143         struct inode_defrag *defrag;
144         u64 transid;
145
146         if (!btrfs_test_opt(root, AUTO_DEFRAG))
147                 return 0;
148
149         if (btrfs_fs_closing(root->fs_info))
150                 return 0;
151
152         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
153                 return 0;
154
155         if (trans)
156                 transid = trans->transid;
157         else
158                 transid = BTRFS_I(inode)->root->last_trans;
159
160         defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
161         if (!defrag)
162                 return -ENOMEM;
163
164         defrag->ino = btrfs_ino(inode);
165         defrag->transid = transid;
166         defrag->root = root->root_key.objectid;
167
168         spin_lock(&root->fs_info->defrag_inodes_lock);
169         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
170                 __btrfs_add_inode_defrag(inode, defrag);
171         else
172                 kfree(defrag);
173         spin_unlock(&root->fs_info->defrag_inodes_lock);
174         return 0;
175 }
176
177 /*
178  * must be called with the defrag_inodes lock held
179  */
180 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
181                                              u64 root, u64 ino,
182                                              struct rb_node **next)
183 {
184         struct inode_defrag *entry = NULL;
185         struct inode_defrag tmp;
186         struct rb_node *p;
187         struct rb_node *parent = NULL;
188         int ret;
189
190         tmp.ino = ino;
191         tmp.root = root;
192
193         p = info->defrag_inodes.rb_node;
194         while (p) {
195                 parent = p;
196                 entry = rb_entry(parent, struct inode_defrag, rb_node);
197
198                 ret = __compare_inode_defrag(&tmp, entry);
199                 if (ret < 0)
200                         p = parent->rb_left;
201                 else if (ret > 0)
202                         p = parent->rb_right;
203                 else
204                         return entry;
205         }
206
207         if (next) {
208                 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
209                         parent = rb_next(parent);
210                         entry = rb_entry(parent, struct inode_defrag, rb_node);
211                 }
212                 *next = parent;
213         }
214         return NULL;
215 }
216
217 /*
218  * run through the list of inodes in the FS that need
219  * defragging
220  */
221 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
222 {
223         struct inode_defrag *defrag;
224         struct btrfs_root *inode_root;
225         struct inode *inode;
226         struct rb_node *n;
227         struct btrfs_key key;
228         struct btrfs_ioctl_defrag_range_args range;
229         u64 first_ino = 0;
230         u64 root_objectid = 0;
231         int num_defrag;
232         int defrag_batch = 1024;
233
234         memset(&range, 0, sizeof(range));
235         range.len = (u64)-1;
236
237         atomic_inc(&fs_info->defrag_running);
238         spin_lock(&fs_info->defrag_inodes_lock);
239         while(1) {
240                 n = NULL;
241
242                 /* find an inode to defrag */
243                 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
244                                                  first_ino, &n);
245                 if (!defrag) {
246                         if (n) {
247                                 defrag = rb_entry(n, struct inode_defrag,
248                                                   rb_node);
249                         } else if (root_objectid || first_ino) {
250                                 root_objectid = 0;
251                                 first_ino = 0;
252                                 continue;
253                         } else {
254                                 break;
255                         }
256                 }
257
258                 /* remove it from the rbtree */
259                 first_ino = defrag->ino + 1;
260                 root_objectid = defrag->root;
261                 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
262
263                 if (btrfs_fs_closing(fs_info))
264                         goto next_free;
265
266                 spin_unlock(&fs_info->defrag_inodes_lock);
267
268                 /* get the inode */
269                 key.objectid = defrag->root;
270                 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
271                 key.offset = (u64)-1;
272                 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
273                 if (IS_ERR(inode_root))
274                         goto next;
275
276                 key.objectid = defrag->ino;
277                 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
278                 key.offset = 0;
279
280                 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
281                 if (IS_ERR(inode))
282                         goto next;
283
284                 /* do a chunk of defrag */
285                 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
286                 range.start = defrag->last_offset;
287                 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
288                                                defrag_batch);
289                 /*
290                  * if we filled the whole defrag batch, there
291                  * must be more work to do.  Queue this defrag
292                  * again
293                  */
294                 if (num_defrag == defrag_batch) {
295                         defrag->last_offset = range.start;
296                         __btrfs_add_inode_defrag(inode, defrag);
297                         /*
298                          * we don't want to kfree defrag, we added it back to
299                          * the rbtree
300                          */
301                         defrag = NULL;
302                 } else if (defrag->last_offset && !defrag->cycled) {
303                         /*
304                          * we didn't fill our defrag batch, but
305                          * we didn't start at zero.  Make sure we loop
306                          * around to the start of the file.
307                          */
308                         defrag->last_offset = 0;
309                         defrag->cycled = 1;
310                         __btrfs_add_inode_defrag(inode, defrag);
311                         defrag = NULL;
312                 }
313
314                 iput(inode);
315 next:
316                 spin_lock(&fs_info->defrag_inodes_lock);
317 next_free:
318                 kfree(defrag);
319         }
320         spin_unlock(&fs_info->defrag_inodes_lock);
321
322         atomic_dec(&fs_info->defrag_running);
323
324         /*
325          * during unmount, we use the transaction_wait queue to
326          * wait for the defragger to stop
327          */
328         wake_up(&fs_info->transaction_wait);
329         return 0;
330 }
331
332 /* simple helper to fault in pages and copy.  This should go away
333  * and be replaced with calls into generic code.
334  */
335 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
336                                          size_t write_bytes,
337                                          struct page **prepared_pages,
338                                          struct iov_iter *i)
339 {
340         size_t copied = 0;
341         size_t total_copied = 0;
342         int pg = 0;
343         int offset = pos & (PAGE_CACHE_SIZE - 1);
344
345         while (write_bytes > 0) {
346                 size_t count = min_t(size_t,
347                                      PAGE_CACHE_SIZE - offset, write_bytes);
348                 struct page *page = prepared_pages[pg];
349                 /*
350                  * Copy data from userspace to the current page
351                  *
352                  * Disable pagefault to avoid recursive lock since
353                  * the pages are already locked
354                  */
355                 pagefault_disable();
356                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
357                 pagefault_enable();
358
359                 /* Flush processor's dcache for this page */
360                 flush_dcache_page(page);
361
362                 /*
363                  * if we get a partial write, we can end up with
364                  * partially up to date pages.  These add
365                  * a lot of complexity, so make sure they don't
366                  * happen by forcing this copy to be retried.
367                  *
368                  * The rest of the btrfs_file_write code will fall
369                  * back to page at a time copies after we return 0.
370                  */
371                 if (!PageUptodate(page) && copied < count)
372                         copied = 0;
373
374                 iov_iter_advance(i, copied);
375                 write_bytes -= copied;
376                 total_copied += copied;
377
378                 /* Return to btrfs_file_aio_write to fault page */
379                 if (unlikely(copied == 0))
380                         break;
381
382                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
383                         offset += copied;
384                 } else {
385                         pg++;
386                         offset = 0;
387                 }
388         }
389         return total_copied;
390 }
391
392 /*
393  * unlocks pages after btrfs_file_write is done with them
394  */
395 void btrfs_drop_pages(struct page **pages, size_t num_pages)
396 {
397         size_t i;
398         for (i = 0; i < num_pages; i++) {
399                 /* page checked is some magic around finding pages that
400                  * have been modified without going through btrfs_set_page_dirty
401                  * clear it here
402                  */
403                 ClearPageChecked(pages[i]);
404                 unlock_page(pages[i]);
405                 mark_page_accessed(pages[i]);
406                 page_cache_release(pages[i]);
407         }
408 }
409
410 /*
411  * after copy_from_user, pages need to be dirtied and we need to make
412  * sure holes are created between the current EOF and the start of
413  * any next extents (if required).
414  *
415  * this also makes the decision about creating an inline extent vs
416  * doing real data extents, marking pages dirty and delalloc as required.
417  */
418 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
419                       struct page **pages, size_t num_pages,
420                       loff_t pos, size_t write_bytes,
421                       struct extent_state **cached)
422 {
423         int err = 0;
424         int i;
425         u64 num_bytes;
426         u64 start_pos;
427         u64 end_of_last_block;
428         u64 end_pos = pos + write_bytes;
429         loff_t isize = i_size_read(inode);
430
431         start_pos = pos & ~((u64)root->sectorsize - 1);
432         num_bytes = (write_bytes + pos - start_pos +
433                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
434
435         end_of_last_block = start_pos + num_bytes - 1;
436         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
437                                         cached);
438         if (err)
439                 return err;
440
441         for (i = 0; i < num_pages; i++) {
442                 struct page *p = pages[i];
443                 SetPageUptodate(p);
444                 ClearPageChecked(p);
445                 set_page_dirty(p);
446         }
447
448         /*
449          * we've only changed i_size in ram, and we haven't updated
450          * the disk i_size.  There is no need to log the inode
451          * at this time.
452          */
453         if (end_pos > isize)
454                 i_size_write(inode, end_pos);
455         return 0;
456 }
457
458 /*
459  * this drops all the extents in the cache that intersect the range
460  * [start, end].  Existing extents are split as required.
461  */
462 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
463                              int skip_pinned)
464 {
465         struct extent_map *em;
466         struct extent_map *split = NULL;
467         struct extent_map *split2 = NULL;
468         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
469         u64 len = end - start + 1;
470         u64 gen;
471         int ret;
472         int testend = 1;
473         unsigned long flags;
474         int compressed = 0;
475
476         WARN_ON(end < start);
477         if (end == (u64)-1) {
478                 len = (u64)-1;
479                 testend = 0;
480         }
481         while (1) {
482                 int no_splits = 0;
483
484                 if (!split)
485                         split = alloc_extent_map();
486                 if (!split2)
487                         split2 = alloc_extent_map();
488                 if (!split || !split2)
489                         no_splits = 1;
490
491                 write_lock(&em_tree->lock);
492                 em = lookup_extent_mapping(em_tree, start, len);
493                 if (!em) {
494                         write_unlock(&em_tree->lock);
495                         break;
496                 }
497                 flags = em->flags;
498                 gen = em->generation;
499                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
500                         if (testend && em->start + em->len >= start + len) {
501                                 free_extent_map(em);
502                                 write_unlock(&em_tree->lock);
503                                 break;
504                         }
505                         start = em->start + em->len;
506                         if (testend)
507                                 len = start + len - (em->start + em->len);
508                         free_extent_map(em);
509                         write_unlock(&em_tree->lock);
510                         continue;
511                 }
512                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
513                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
514                 remove_extent_mapping(em_tree, em);
515                 if (no_splits)
516                         goto next;
517
518                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
519                     em->start < start) {
520                         split->start = em->start;
521                         split->len = start - em->start;
522                         split->orig_start = em->orig_start;
523                         split->block_start = em->block_start;
524
525                         if (compressed)
526                                 split->block_len = em->block_len;
527                         else
528                                 split->block_len = split->len;
529                         split->generation = gen;
530                         split->bdev = em->bdev;
531                         split->flags = flags;
532                         split->compress_type = em->compress_type;
533                         ret = add_extent_mapping(em_tree, split);
534                         BUG_ON(ret); /* Logic error */
535                         list_move(&split->list, &em_tree->modified_extents);
536                         free_extent_map(split);
537                         split = split2;
538                         split2 = NULL;
539                 }
540                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
541                     testend && em->start + em->len > start + len) {
542                         u64 diff = start + len - em->start;
543
544                         split->start = start + len;
545                         split->len = em->start + em->len - (start + len);
546                         split->bdev = em->bdev;
547                         split->flags = flags;
548                         split->compress_type = em->compress_type;
549                         split->generation = gen;
550
551                         if (compressed) {
552                                 split->block_len = em->block_len;
553                                 split->block_start = em->block_start;
554                                 split->orig_start = em->orig_start;
555                         } else {
556                                 split->block_len = split->len;
557                                 split->block_start = em->block_start + diff;
558                                 split->orig_start = split->start;
559                         }
560
561                         ret = add_extent_mapping(em_tree, split);
562                         BUG_ON(ret); /* Logic error */
563                         list_move(&split->list, &em_tree->modified_extents);
564                         free_extent_map(split);
565                         split = NULL;
566                 }
567 next:
568                 write_unlock(&em_tree->lock);
569
570                 /* once for us */
571                 free_extent_map(em);
572                 /* once for the tree*/
573                 free_extent_map(em);
574         }
575         if (split)
576                 free_extent_map(split);
577         if (split2)
578                 free_extent_map(split2);
579 }
580
581 /*
582  * this is very complex, but the basic idea is to drop all extents
583  * in the range start - end.  hint_block is filled in with a block number
584  * that would be a good hint to the block allocator for this file.
585  *
586  * If an extent intersects the range but is not entirely inside the range
587  * it is either truncated or split.  Anything entirely inside the range
588  * is deleted from the tree.
589  */
590 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
591                          struct btrfs_root *root, struct inode *inode,
592                          struct btrfs_path *path, u64 start, u64 end,
593                          u64 *drop_end, int drop_cache)
594 {
595         struct extent_buffer *leaf;
596         struct btrfs_file_extent_item *fi;
597         struct btrfs_key key;
598         struct btrfs_key new_key;
599         u64 ino = btrfs_ino(inode);
600         u64 search_start = start;
601         u64 disk_bytenr = 0;
602         u64 num_bytes = 0;
603         u64 extent_offset = 0;
604         u64 extent_end = 0;
605         int del_nr = 0;
606         int del_slot = 0;
607         int extent_type;
608         int recow;
609         int ret;
610         int modify_tree = -1;
611         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
612         int found = 0;
613
614         if (drop_cache)
615                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
616
617         if (start >= BTRFS_I(inode)->disk_i_size)
618                 modify_tree = 0;
619
620         while (1) {
621                 recow = 0;
622                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
623                                                search_start, modify_tree);
624                 if (ret < 0)
625                         break;
626                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
627                         leaf = path->nodes[0];
628                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
629                         if (key.objectid == ino &&
630                             key.type == BTRFS_EXTENT_DATA_KEY)
631                                 path->slots[0]--;
632                 }
633                 ret = 0;
634 next_slot:
635                 leaf = path->nodes[0];
636                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
637                         BUG_ON(del_nr > 0);
638                         ret = btrfs_next_leaf(root, path);
639                         if (ret < 0)
640                                 break;
641                         if (ret > 0) {
642                                 ret = 0;
643                                 break;
644                         }
645                         leaf = path->nodes[0];
646                         recow = 1;
647                 }
648
649                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
650                 if (key.objectid > ino ||
651                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
652                         break;
653
654                 fi = btrfs_item_ptr(leaf, path->slots[0],
655                                     struct btrfs_file_extent_item);
656                 extent_type = btrfs_file_extent_type(leaf, fi);
657
658                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
659                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
660                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
661                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
662                         extent_offset = btrfs_file_extent_offset(leaf, fi);
663                         extent_end = key.offset +
664                                 btrfs_file_extent_num_bytes(leaf, fi);
665                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
666                         extent_end = key.offset +
667                                 btrfs_file_extent_inline_len(leaf, fi);
668                 } else {
669                         WARN_ON(1);
670                         extent_end = search_start;
671                 }
672
673                 if (extent_end <= search_start) {
674                         path->slots[0]++;
675                         goto next_slot;
676                 }
677
678                 found = 1;
679                 search_start = max(key.offset, start);
680                 if (recow || !modify_tree) {
681                         modify_tree = -1;
682                         btrfs_release_path(path);
683                         continue;
684                 }
685
686                 /*
687                  *     | - range to drop - |
688                  *  | -------- extent -------- |
689                  */
690                 if (start > key.offset && end < extent_end) {
691                         BUG_ON(del_nr > 0);
692                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
693
694                         memcpy(&new_key, &key, sizeof(new_key));
695                         new_key.offset = start;
696                         ret = btrfs_duplicate_item(trans, root, path,
697                                                    &new_key);
698                         if (ret == -EAGAIN) {
699                                 btrfs_release_path(path);
700                                 continue;
701                         }
702                         if (ret < 0)
703                                 break;
704
705                         leaf = path->nodes[0];
706                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
707                                             struct btrfs_file_extent_item);
708                         btrfs_set_file_extent_num_bytes(leaf, fi,
709                                                         start - key.offset);
710
711                         fi = btrfs_item_ptr(leaf, path->slots[0],
712                                             struct btrfs_file_extent_item);
713
714                         extent_offset += start - key.offset;
715                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
716                         btrfs_set_file_extent_num_bytes(leaf, fi,
717                                                         extent_end - start);
718                         btrfs_mark_buffer_dirty(leaf);
719
720                         if (update_refs && disk_bytenr > 0) {
721                                 ret = btrfs_inc_extent_ref(trans, root,
722                                                 disk_bytenr, num_bytes, 0,
723                                                 root->root_key.objectid,
724                                                 new_key.objectid,
725                                                 start - extent_offset, 0);
726                                 BUG_ON(ret); /* -ENOMEM */
727                         }
728                         key.offset = start;
729                 }
730                 /*
731                  *  | ---- range to drop ----- |
732                  *      | -------- extent -------- |
733                  */
734                 if (start <= key.offset && end < extent_end) {
735                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
736
737                         memcpy(&new_key, &key, sizeof(new_key));
738                         new_key.offset = end;
739                         btrfs_set_item_key_safe(trans, root, path, &new_key);
740
741                         extent_offset += end - key.offset;
742                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
743                         btrfs_set_file_extent_num_bytes(leaf, fi,
744                                                         extent_end - end);
745                         btrfs_mark_buffer_dirty(leaf);
746                         if (update_refs && disk_bytenr > 0)
747                                 inode_sub_bytes(inode, end - key.offset);
748                         break;
749                 }
750
751                 search_start = extent_end;
752                 /*
753                  *       | ---- range to drop ----- |
754                  *  | -------- extent -------- |
755                  */
756                 if (start > key.offset && end >= extent_end) {
757                         BUG_ON(del_nr > 0);
758                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
759
760                         btrfs_set_file_extent_num_bytes(leaf, fi,
761                                                         start - key.offset);
762                         btrfs_mark_buffer_dirty(leaf);
763                         if (update_refs && disk_bytenr > 0)
764                                 inode_sub_bytes(inode, extent_end - start);
765                         if (end == extent_end)
766                                 break;
767
768                         path->slots[0]++;
769                         goto next_slot;
770                 }
771
772                 /*
773                  *  | ---- range to drop ----- |
774                  *    | ------ extent ------ |
775                  */
776                 if (start <= key.offset && end >= extent_end) {
777                         if (del_nr == 0) {
778                                 del_slot = path->slots[0];
779                                 del_nr = 1;
780                         } else {
781                                 BUG_ON(del_slot + del_nr != path->slots[0]);
782                                 del_nr++;
783                         }
784
785                         if (update_refs &&
786                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
787                                 inode_sub_bytes(inode,
788                                                 extent_end - key.offset);
789                                 extent_end = ALIGN(extent_end,
790                                                    root->sectorsize);
791                         } else if (update_refs && disk_bytenr > 0) {
792                                 ret = btrfs_free_extent(trans, root,
793                                                 disk_bytenr, num_bytes, 0,
794                                                 root->root_key.objectid,
795                                                 key.objectid, key.offset -
796                                                 extent_offset, 0);
797                                 BUG_ON(ret); /* -ENOMEM */
798                                 inode_sub_bytes(inode,
799                                                 extent_end - key.offset);
800                         }
801
802                         if (end == extent_end)
803                                 break;
804
805                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
806                                 path->slots[0]++;
807                                 goto next_slot;
808                         }
809
810                         ret = btrfs_del_items(trans, root, path, del_slot,
811                                               del_nr);
812                         if (ret) {
813                                 btrfs_abort_transaction(trans, root, ret);
814                                 break;
815                         }
816
817                         del_nr = 0;
818                         del_slot = 0;
819
820                         btrfs_release_path(path);
821                         continue;
822                 }
823
824                 BUG_ON(1);
825         }
826
827         if (!ret && del_nr > 0) {
828                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
829                 if (ret)
830                         btrfs_abort_transaction(trans, root, ret);
831         }
832
833         if (drop_end)
834                 *drop_end = found ? min(end, extent_end) : end;
835         btrfs_release_path(path);
836         return ret;
837 }
838
839 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
840                        struct btrfs_root *root, struct inode *inode, u64 start,
841                        u64 end, int drop_cache)
842 {
843         struct btrfs_path *path;
844         int ret;
845
846         path = btrfs_alloc_path();
847         if (!path)
848                 return -ENOMEM;
849         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
850                                    drop_cache);
851         btrfs_free_path(path);
852         return ret;
853 }
854
855 static int extent_mergeable(struct extent_buffer *leaf, int slot,
856                             u64 objectid, u64 bytenr, u64 orig_offset,
857                             u64 *start, u64 *end)
858 {
859         struct btrfs_file_extent_item *fi;
860         struct btrfs_key key;
861         u64 extent_end;
862
863         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
864                 return 0;
865
866         btrfs_item_key_to_cpu(leaf, &key, slot);
867         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
868                 return 0;
869
870         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
871         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
872             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
873             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
874             btrfs_file_extent_compression(leaf, fi) ||
875             btrfs_file_extent_encryption(leaf, fi) ||
876             btrfs_file_extent_other_encoding(leaf, fi))
877                 return 0;
878
879         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
880         if ((*start && *start != key.offset) || (*end && *end != extent_end))
881                 return 0;
882
883         *start = key.offset;
884         *end = extent_end;
885         return 1;
886 }
887
888 /*
889  * Mark extent in the range start - end as written.
890  *
891  * This changes extent type from 'pre-allocated' to 'regular'. If only
892  * part of extent is marked as written, the extent will be split into
893  * two or three.
894  */
895 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
896                               struct inode *inode, u64 start, u64 end)
897 {
898         struct btrfs_root *root = BTRFS_I(inode)->root;
899         struct extent_buffer *leaf;
900         struct btrfs_path *path;
901         struct btrfs_file_extent_item *fi;
902         struct btrfs_key key;
903         struct btrfs_key new_key;
904         u64 bytenr;
905         u64 num_bytes;
906         u64 extent_end;
907         u64 orig_offset;
908         u64 other_start;
909         u64 other_end;
910         u64 split;
911         int del_nr = 0;
912         int del_slot = 0;
913         int recow;
914         int ret;
915         u64 ino = btrfs_ino(inode);
916
917         path = btrfs_alloc_path();
918         if (!path)
919                 return -ENOMEM;
920 again:
921         recow = 0;
922         split = start;
923         key.objectid = ino;
924         key.type = BTRFS_EXTENT_DATA_KEY;
925         key.offset = split;
926
927         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
928         if (ret < 0)
929                 goto out;
930         if (ret > 0 && path->slots[0] > 0)
931                 path->slots[0]--;
932
933         leaf = path->nodes[0];
934         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
935         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
936         fi = btrfs_item_ptr(leaf, path->slots[0],
937                             struct btrfs_file_extent_item);
938         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
939                BTRFS_FILE_EXTENT_PREALLOC);
940         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
941         BUG_ON(key.offset > start || extent_end < end);
942
943         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
944         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
945         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
946         memcpy(&new_key, &key, sizeof(new_key));
947
948         if (start == key.offset && end < extent_end) {
949                 other_start = 0;
950                 other_end = start;
951                 if (extent_mergeable(leaf, path->slots[0] - 1,
952                                      ino, bytenr, orig_offset,
953                                      &other_start, &other_end)) {
954                         new_key.offset = end;
955                         btrfs_set_item_key_safe(trans, root, path, &new_key);
956                         fi = btrfs_item_ptr(leaf, path->slots[0],
957                                             struct btrfs_file_extent_item);
958                         btrfs_set_file_extent_generation(leaf, fi,
959                                                          trans->transid);
960                         btrfs_set_file_extent_num_bytes(leaf, fi,
961                                                         extent_end - end);
962                         btrfs_set_file_extent_offset(leaf, fi,
963                                                      end - orig_offset);
964                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
965                                             struct btrfs_file_extent_item);
966                         btrfs_set_file_extent_generation(leaf, fi,
967                                                          trans->transid);
968                         btrfs_set_file_extent_num_bytes(leaf, fi,
969                                                         end - other_start);
970                         btrfs_mark_buffer_dirty(leaf);
971                         goto out;
972                 }
973         }
974
975         if (start > key.offset && end == extent_end) {
976                 other_start = end;
977                 other_end = 0;
978                 if (extent_mergeable(leaf, path->slots[0] + 1,
979                                      ino, bytenr, orig_offset,
980                                      &other_start, &other_end)) {
981                         fi = btrfs_item_ptr(leaf, path->slots[0],
982                                             struct btrfs_file_extent_item);
983                         btrfs_set_file_extent_num_bytes(leaf, fi,
984                                                         start - key.offset);
985                         btrfs_set_file_extent_generation(leaf, fi,
986                                                          trans->transid);
987                         path->slots[0]++;
988                         new_key.offset = start;
989                         btrfs_set_item_key_safe(trans, root, path, &new_key);
990
991                         fi = btrfs_item_ptr(leaf, path->slots[0],
992                                             struct btrfs_file_extent_item);
993                         btrfs_set_file_extent_generation(leaf, fi,
994                                                          trans->transid);
995                         btrfs_set_file_extent_num_bytes(leaf, fi,
996                                                         other_end - start);
997                         btrfs_set_file_extent_offset(leaf, fi,
998                                                      start - orig_offset);
999                         btrfs_mark_buffer_dirty(leaf);
1000                         goto out;
1001                 }
1002         }
1003
1004         while (start > key.offset || end < extent_end) {
1005                 if (key.offset == start)
1006                         split = end;
1007
1008                 new_key.offset = split;
1009                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1010                 if (ret == -EAGAIN) {
1011                         btrfs_release_path(path);
1012                         goto again;
1013                 }
1014                 if (ret < 0) {
1015                         btrfs_abort_transaction(trans, root, ret);
1016                         goto out;
1017                 }
1018
1019                 leaf = path->nodes[0];
1020                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1021                                     struct btrfs_file_extent_item);
1022                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1023                 btrfs_set_file_extent_num_bytes(leaf, fi,
1024                                                 split - key.offset);
1025
1026                 fi = btrfs_item_ptr(leaf, path->slots[0],
1027                                     struct btrfs_file_extent_item);
1028
1029                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1030                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1031                 btrfs_set_file_extent_num_bytes(leaf, fi,
1032                                                 extent_end - split);
1033                 btrfs_mark_buffer_dirty(leaf);
1034
1035                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1036                                            root->root_key.objectid,
1037                                            ino, orig_offset, 0);
1038                 BUG_ON(ret); /* -ENOMEM */
1039
1040                 if (split == start) {
1041                         key.offset = start;
1042                 } else {
1043                         BUG_ON(start != key.offset);
1044                         path->slots[0]--;
1045                         extent_end = end;
1046                 }
1047                 recow = 1;
1048         }
1049
1050         other_start = end;
1051         other_end = 0;
1052         if (extent_mergeable(leaf, path->slots[0] + 1,
1053                              ino, bytenr, orig_offset,
1054                              &other_start, &other_end)) {
1055                 if (recow) {
1056                         btrfs_release_path(path);
1057                         goto again;
1058                 }
1059                 extent_end = other_end;
1060                 del_slot = path->slots[0] + 1;
1061                 del_nr++;
1062                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1063                                         0, root->root_key.objectid,
1064                                         ino, orig_offset, 0);
1065                 BUG_ON(ret); /* -ENOMEM */
1066         }
1067         other_start = 0;
1068         other_end = start;
1069         if (extent_mergeable(leaf, path->slots[0] - 1,
1070                              ino, bytenr, orig_offset,
1071                              &other_start, &other_end)) {
1072                 if (recow) {
1073                         btrfs_release_path(path);
1074                         goto again;
1075                 }
1076                 key.offset = other_start;
1077                 del_slot = path->slots[0];
1078                 del_nr++;
1079                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1080                                         0, root->root_key.objectid,
1081                                         ino, orig_offset, 0);
1082                 BUG_ON(ret); /* -ENOMEM */
1083         }
1084         if (del_nr == 0) {
1085                 fi = btrfs_item_ptr(leaf, path->slots[0],
1086                            struct btrfs_file_extent_item);
1087                 btrfs_set_file_extent_type(leaf, fi,
1088                                            BTRFS_FILE_EXTENT_REG);
1089                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1090                 btrfs_mark_buffer_dirty(leaf);
1091         } else {
1092                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1093                            struct btrfs_file_extent_item);
1094                 btrfs_set_file_extent_type(leaf, fi,
1095                                            BTRFS_FILE_EXTENT_REG);
1096                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1097                 btrfs_set_file_extent_num_bytes(leaf, fi,
1098                                                 extent_end - key.offset);
1099                 btrfs_mark_buffer_dirty(leaf);
1100
1101                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1102                 if (ret < 0) {
1103                         btrfs_abort_transaction(trans, root, ret);
1104                         goto out;
1105                 }
1106         }
1107 out:
1108         btrfs_free_path(path);
1109         return 0;
1110 }
1111
1112 /*
1113  * on error we return an unlocked page and the error value
1114  * on success we return a locked page and 0
1115  */
1116 static int prepare_uptodate_page(struct page *page, u64 pos,
1117                                  bool force_uptodate)
1118 {
1119         int ret = 0;
1120
1121         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1122             !PageUptodate(page)) {
1123                 ret = btrfs_readpage(NULL, page);
1124                 if (ret)
1125                         return ret;
1126                 lock_page(page);
1127                 if (!PageUptodate(page)) {
1128                         unlock_page(page);
1129                         return -EIO;
1130                 }
1131         }
1132         return 0;
1133 }
1134
1135 /*
1136  * this gets pages into the page cache and locks them down, it also properly
1137  * waits for data=ordered extents to finish before allowing the pages to be
1138  * modified.
1139  */
1140 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1141                          struct page **pages, size_t num_pages,
1142                          loff_t pos, unsigned long first_index,
1143                          size_t write_bytes, bool force_uptodate)
1144 {
1145         struct extent_state *cached_state = NULL;
1146         int i;
1147         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1148         struct inode *inode = fdentry(file)->d_inode;
1149         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1150         int err = 0;
1151         int faili = 0;
1152         u64 start_pos;
1153         u64 last_pos;
1154
1155         start_pos = pos & ~((u64)root->sectorsize - 1);
1156         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1157
1158 again:
1159         for (i = 0; i < num_pages; i++) {
1160                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1161                                                mask | __GFP_WRITE);
1162                 if (!pages[i]) {
1163                         faili = i - 1;
1164                         err = -ENOMEM;
1165                         goto fail;
1166                 }
1167
1168                 if (i == 0)
1169                         err = prepare_uptodate_page(pages[i], pos,
1170                                                     force_uptodate);
1171                 if (i == num_pages - 1)
1172                         err = prepare_uptodate_page(pages[i],
1173                                                     pos + write_bytes, false);
1174                 if (err) {
1175                         page_cache_release(pages[i]);
1176                         faili = i - 1;
1177                         goto fail;
1178                 }
1179                 wait_on_page_writeback(pages[i]);
1180         }
1181         err = 0;
1182         if (start_pos < inode->i_size) {
1183                 struct btrfs_ordered_extent *ordered;
1184                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1185                                  start_pos, last_pos - 1, 0, &cached_state);
1186                 ordered = btrfs_lookup_first_ordered_extent(inode,
1187                                                             last_pos - 1);
1188                 if (ordered &&
1189                     ordered->file_offset + ordered->len > start_pos &&
1190                     ordered->file_offset < last_pos) {
1191                         btrfs_put_ordered_extent(ordered);
1192                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1193                                              start_pos, last_pos - 1,
1194                                              &cached_state, GFP_NOFS);
1195                         for (i = 0; i < num_pages; i++) {
1196                                 unlock_page(pages[i]);
1197                                 page_cache_release(pages[i]);
1198                         }
1199                         btrfs_wait_ordered_range(inode, start_pos,
1200                                                  last_pos - start_pos);
1201                         goto again;
1202                 }
1203                 if (ordered)
1204                         btrfs_put_ordered_extent(ordered);
1205
1206                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1207                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1208                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1209                                   0, 0, &cached_state, GFP_NOFS);
1210                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1211                                      start_pos, last_pos - 1, &cached_state,
1212                                      GFP_NOFS);
1213         }
1214         for (i = 0; i < num_pages; i++) {
1215                 if (clear_page_dirty_for_io(pages[i]))
1216                         account_page_redirty(pages[i]);
1217                 set_page_extent_mapped(pages[i]);
1218                 WARN_ON(!PageLocked(pages[i]));
1219         }
1220         return 0;
1221 fail:
1222         while (faili >= 0) {
1223                 unlock_page(pages[faili]);
1224                 page_cache_release(pages[faili]);
1225                 faili--;
1226         }
1227         return err;
1228
1229 }
1230
1231 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1232                                                struct iov_iter *i,
1233                                                loff_t pos)
1234 {
1235         struct inode *inode = fdentry(file)->d_inode;
1236         struct btrfs_root *root = BTRFS_I(inode)->root;
1237         struct page **pages = NULL;
1238         unsigned long first_index;
1239         size_t num_written = 0;
1240         int nrptrs;
1241         int ret = 0;
1242         bool force_page_uptodate = false;
1243
1244         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1245                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1246                      (sizeof(struct page *)));
1247         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1248         nrptrs = max(nrptrs, 8);
1249         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1250         if (!pages)
1251                 return -ENOMEM;
1252
1253         first_index = pos >> PAGE_CACHE_SHIFT;
1254
1255         while (iov_iter_count(i) > 0) {
1256                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1257                 size_t write_bytes = min(iov_iter_count(i),
1258                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1259                                          offset);
1260                 size_t num_pages = (write_bytes + offset +
1261                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1262                 size_t dirty_pages;
1263                 size_t copied;
1264
1265                 WARN_ON(num_pages > nrptrs);
1266
1267                 /*
1268                  * Fault pages before locking them in prepare_pages
1269                  * to avoid recursive lock
1270                  */
1271                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1272                         ret = -EFAULT;
1273                         break;
1274                 }
1275
1276                 ret = btrfs_delalloc_reserve_space(inode,
1277                                         num_pages << PAGE_CACHE_SHIFT);
1278                 if (ret)
1279                         break;
1280
1281                 /*
1282                  * This is going to setup the pages array with the number of
1283                  * pages we want, so we don't really need to worry about the
1284                  * contents of pages from loop to loop
1285                  */
1286                 ret = prepare_pages(root, file, pages, num_pages,
1287                                     pos, first_index, write_bytes,
1288                                     force_page_uptodate);
1289                 if (ret) {
1290                         btrfs_delalloc_release_space(inode,
1291                                         num_pages << PAGE_CACHE_SHIFT);
1292                         break;
1293                 }
1294
1295                 copied = btrfs_copy_from_user(pos, num_pages,
1296                                            write_bytes, pages, i);
1297
1298                 /*
1299                  * if we have trouble faulting in the pages, fall
1300                  * back to one page at a time
1301                  */
1302                 if (copied < write_bytes)
1303                         nrptrs = 1;
1304
1305                 if (copied == 0) {
1306                         force_page_uptodate = true;
1307                         dirty_pages = 0;
1308                 } else {
1309                         force_page_uptodate = false;
1310                         dirty_pages = (copied + offset +
1311                                        PAGE_CACHE_SIZE - 1) >>
1312                                        PAGE_CACHE_SHIFT;
1313                 }
1314
1315                 /*
1316                  * If we had a short copy we need to release the excess delaloc
1317                  * bytes we reserved.  We need to increment outstanding_extents
1318                  * because btrfs_delalloc_release_space will decrement it, but
1319                  * we still have an outstanding extent for the chunk we actually
1320                  * managed to copy.
1321                  */
1322                 if (num_pages > dirty_pages) {
1323                         if (copied > 0) {
1324                                 spin_lock(&BTRFS_I(inode)->lock);
1325                                 BTRFS_I(inode)->outstanding_extents++;
1326                                 spin_unlock(&BTRFS_I(inode)->lock);
1327                         }
1328                         btrfs_delalloc_release_space(inode,
1329                                         (num_pages - dirty_pages) <<
1330                                         PAGE_CACHE_SHIFT);
1331                 }
1332
1333                 if (copied > 0) {
1334                         ret = btrfs_dirty_pages(root, inode, pages,
1335                                                 dirty_pages, pos, copied,
1336                                                 NULL);
1337                         if (ret) {
1338                                 btrfs_delalloc_release_space(inode,
1339                                         dirty_pages << PAGE_CACHE_SHIFT);
1340                                 btrfs_drop_pages(pages, num_pages);
1341                                 break;
1342                         }
1343                 }
1344
1345                 btrfs_drop_pages(pages, num_pages);
1346
1347                 cond_resched();
1348
1349                 balance_dirty_pages_ratelimited(inode->i_mapping);
1350                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1351                         btrfs_btree_balance_dirty(root, 1);
1352
1353                 pos += copied;
1354                 num_written += copied;
1355         }
1356
1357         kfree(pages);
1358
1359         return num_written ? num_written : ret;
1360 }
1361
1362 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1363                                     const struct iovec *iov,
1364                                     unsigned long nr_segs, loff_t pos,
1365                                     loff_t *ppos, size_t count, size_t ocount)
1366 {
1367         struct file *file = iocb->ki_filp;
1368         struct iov_iter i;
1369         ssize_t written;
1370         ssize_t written_buffered;
1371         loff_t endbyte;
1372         int err;
1373
1374         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1375                                             count, ocount);
1376
1377         if (written < 0 || written == count)
1378                 return written;
1379
1380         pos += written;
1381         count -= written;
1382         iov_iter_init(&i, iov, nr_segs, count, written);
1383         written_buffered = __btrfs_buffered_write(file, &i, pos);
1384         if (written_buffered < 0) {
1385                 err = written_buffered;
1386                 goto out;
1387         }
1388         endbyte = pos + written_buffered - 1;
1389         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1390         if (err)
1391                 goto out;
1392         written += written_buffered;
1393         *ppos = pos + written_buffered;
1394         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1395                                  endbyte >> PAGE_CACHE_SHIFT);
1396 out:
1397         return written ? written : err;
1398 }
1399
1400 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1401                                     const struct iovec *iov,
1402                                     unsigned long nr_segs, loff_t pos)
1403 {
1404         struct file *file = iocb->ki_filp;
1405         struct inode *inode = fdentry(file)->d_inode;
1406         struct btrfs_root *root = BTRFS_I(inode)->root;
1407         loff_t *ppos = &iocb->ki_pos;
1408         u64 start_pos;
1409         ssize_t num_written = 0;
1410         ssize_t err = 0;
1411         size_t count, ocount;
1412
1413         sb_start_write(inode->i_sb);
1414
1415         mutex_lock(&inode->i_mutex);
1416
1417         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1418         if (err) {
1419                 mutex_unlock(&inode->i_mutex);
1420                 goto out;
1421         }
1422         count = ocount;
1423
1424         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1425         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1426         if (err) {
1427                 mutex_unlock(&inode->i_mutex);
1428                 goto out;
1429         }
1430
1431         if (count == 0) {
1432                 mutex_unlock(&inode->i_mutex);
1433                 goto out;
1434         }
1435
1436         err = file_remove_suid(file);
1437         if (err) {
1438                 mutex_unlock(&inode->i_mutex);
1439                 goto out;
1440         }
1441
1442         /*
1443          * If BTRFS flips readonly due to some impossible error
1444          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1445          * although we have opened a file as writable, we have
1446          * to stop this write operation to ensure FS consistency.
1447          */
1448         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1449                 mutex_unlock(&inode->i_mutex);
1450                 err = -EROFS;
1451                 goto out;
1452         }
1453
1454         err = file_update_time(file);
1455         if (err) {
1456                 mutex_unlock(&inode->i_mutex);
1457                 goto out;
1458         }
1459
1460         start_pos = round_down(pos, root->sectorsize);
1461         if (start_pos > i_size_read(inode)) {
1462                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1463                 if (err) {
1464                         mutex_unlock(&inode->i_mutex);
1465                         goto out;
1466                 }
1467         }
1468
1469         if (unlikely(file->f_flags & O_DIRECT)) {
1470                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1471                                                    pos, ppos, count, ocount);
1472         } else {
1473                 struct iov_iter i;
1474
1475                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1476
1477                 num_written = __btrfs_buffered_write(file, &i, pos);
1478                 if (num_written > 0)
1479                         *ppos = pos + num_written;
1480         }
1481
1482         mutex_unlock(&inode->i_mutex);
1483
1484         /*
1485          * we want to make sure fsync finds this change
1486          * but we haven't joined a transaction running right now.
1487          *
1488          * Later on, someone is sure to update the inode and get the
1489          * real transid recorded.
1490          *
1491          * We set last_trans now to the fs_info generation + 1,
1492          * this will either be one more than the running transaction
1493          * or the generation used for the next transaction if there isn't
1494          * one running right now.
1495          */
1496         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1497         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1498                 err = generic_write_sync(file, pos, num_written);
1499                 if (err < 0 && num_written > 0)
1500                         num_written = err;
1501         }
1502 out:
1503         sb_end_write(inode->i_sb);
1504         current->backing_dev_info = NULL;
1505         return num_written ? num_written : err;
1506 }
1507
1508 int btrfs_release_file(struct inode *inode, struct file *filp)
1509 {
1510         /*
1511          * ordered_data_close is set by settattr when we are about to truncate
1512          * a file from a non-zero size to a zero size.  This tries to
1513          * flush down new bytes that may have been written if the
1514          * application were using truncate to replace a file in place.
1515          */
1516         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1517                                &BTRFS_I(inode)->runtime_flags)) {
1518                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1519                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1520                         filemap_flush(inode->i_mapping);
1521         }
1522         if (filp->private_data)
1523                 btrfs_ioctl_trans_end(filp);
1524         return 0;
1525 }
1526
1527 /*
1528  * fsync call for both files and directories.  This logs the inode into
1529  * the tree log instead of forcing full commits whenever possible.
1530  *
1531  * It needs to call filemap_fdatawait so that all ordered extent updates are
1532  * in the metadata btree are up to date for copying to the log.
1533  *
1534  * It drops the inode mutex before doing the tree log commit.  This is an
1535  * important optimization for directories because holding the mutex prevents
1536  * new operations on the dir while we write to disk.
1537  */
1538 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1539 {
1540         struct dentry *dentry = file->f_path.dentry;
1541         struct inode *inode = dentry->d_inode;
1542         struct btrfs_root *root = BTRFS_I(inode)->root;
1543         int ret = 0;
1544         struct btrfs_trans_handle *trans;
1545
1546         trace_btrfs_sync_file(file, datasync);
1547
1548         /*
1549          * We write the dirty pages in the range and wait until they complete
1550          * out of the ->i_mutex. If so, we can flush the dirty pages by
1551          * multi-task, and make the performance up.
1552          */
1553         ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1554         if (ret)
1555                 return ret;
1556
1557         mutex_lock(&inode->i_mutex);
1558
1559         /*
1560          * We flush the dirty pages again to avoid some dirty pages in the
1561          * range being left.
1562          */
1563         atomic_inc(&root->log_batch);
1564         btrfs_wait_ordered_range(inode, start, end);
1565         atomic_inc(&root->log_batch);
1566
1567         /*
1568          * check the transaction that last modified this inode
1569          * and see if its already been committed
1570          */
1571         if (!BTRFS_I(inode)->last_trans) {
1572                 mutex_unlock(&inode->i_mutex);
1573                 goto out;
1574         }
1575
1576         /*
1577          * if the last transaction that changed this file was before
1578          * the current transaction, we can bail out now without any
1579          * syncing
1580          */
1581         smp_mb();
1582         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1583             BTRFS_I(inode)->last_trans <=
1584             root->fs_info->last_trans_committed) {
1585                 BTRFS_I(inode)->last_trans = 0;
1586
1587                 /*
1588                  * We'v had everything committed since the last time we were
1589                  * modified so clear this flag in case it was set for whatever
1590                  * reason, it's no longer relevant.
1591                  */
1592                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1593                           &BTRFS_I(inode)->runtime_flags);
1594                 mutex_unlock(&inode->i_mutex);
1595                 goto out;
1596         }
1597
1598         /*
1599          * ok we haven't committed the transaction yet, lets do a commit
1600          */
1601         if (file->private_data)
1602                 btrfs_ioctl_trans_end(file);
1603
1604         trans = btrfs_start_transaction(root, 0);
1605         if (IS_ERR(trans)) {
1606                 ret = PTR_ERR(trans);
1607                 mutex_unlock(&inode->i_mutex);
1608                 goto out;
1609         }
1610
1611         ret = btrfs_log_dentry_safe(trans, root, dentry);
1612         if (ret < 0) {
1613                 mutex_unlock(&inode->i_mutex);
1614                 goto out;
1615         }
1616
1617         /* we've logged all the items and now have a consistent
1618          * version of the file in the log.  It is possible that
1619          * someone will come in and modify the file, but that's
1620          * fine because the log is consistent on disk, and we
1621          * have references to all of the file's extents
1622          *
1623          * It is possible that someone will come in and log the
1624          * file again, but that will end up using the synchronization
1625          * inside btrfs_sync_log to keep things safe.
1626          */
1627         mutex_unlock(&inode->i_mutex);
1628
1629         if (ret != BTRFS_NO_LOG_SYNC) {
1630                 if (ret > 0) {
1631                         ret = btrfs_commit_transaction(trans, root);
1632                 } else {
1633                         ret = btrfs_sync_log(trans, root);
1634                         if (ret == 0)
1635                                 ret = btrfs_end_transaction(trans, root);
1636                         else
1637                                 ret = btrfs_commit_transaction(trans, root);
1638                 }
1639         } else {
1640                 ret = btrfs_end_transaction(trans, root);
1641         }
1642 out:
1643         return ret > 0 ? -EIO : ret;
1644 }
1645
1646 static const struct vm_operations_struct btrfs_file_vm_ops = {
1647         .fault          = filemap_fault,
1648         .page_mkwrite   = btrfs_page_mkwrite,
1649         .remap_pages    = generic_file_remap_pages,
1650 };
1651
1652 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1653 {
1654         struct address_space *mapping = filp->f_mapping;
1655
1656         if (!mapping->a_ops->readpage)
1657                 return -ENOEXEC;
1658
1659         file_accessed(filp);
1660         vma->vm_ops = &btrfs_file_vm_ops;
1661
1662         return 0;
1663 }
1664
1665 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1666                           int slot, u64 start, u64 end)
1667 {
1668         struct btrfs_file_extent_item *fi;
1669         struct btrfs_key key;
1670
1671         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1672                 return 0;
1673
1674         btrfs_item_key_to_cpu(leaf, &key, slot);
1675         if (key.objectid != btrfs_ino(inode) ||
1676             key.type != BTRFS_EXTENT_DATA_KEY)
1677                 return 0;
1678
1679         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1680
1681         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1682                 return 0;
1683
1684         if (btrfs_file_extent_disk_bytenr(leaf, fi))
1685                 return 0;
1686
1687         if (key.offset == end)
1688                 return 1;
1689         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1690                 return 1;
1691         return 0;
1692 }
1693
1694 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1695                       struct btrfs_path *path, u64 offset, u64 end)
1696 {
1697         struct btrfs_root *root = BTRFS_I(inode)->root;
1698         struct extent_buffer *leaf;
1699         struct btrfs_file_extent_item *fi;
1700         struct extent_map *hole_em;
1701         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1702         struct btrfs_key key;
1703         int ret;
1704
1705         key.objectid = btrfs_ino(inode);
1706         key.type = BTRFS_EXTENT_DATA_KEY;
1707         key.offset = offset;
1708
1709
1710         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1711         if (ret < 0)
1712                 return ret;
1713         BUG_ON(!ret);
1714
1715         leaf = path->nodes[0];
1716         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1717                 u64 num_bytes;
1718
1719                 path->slots[0]--;
1720                 fi = btrfs_item_ptr(leaf, path->slots[0],
1721                                     struct btrfs_file_extent_item);
1722                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1723                         end - offset;
1724                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1725                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1726                 btrfs_set_file_extent_offset(leaf, fi, 0);
1727                 btrfs_mark_buffer_dirty(leaf);
1728                 goto out;
1729         }
1730
1731         if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1732                 u64 num_bytes;
1733
1734                 path->slots[0]++;
1735                 key.offset = offset;
1736                 btrfs_set_item_key_safe(trans, root, path, &key);
1737                 fi = btrfs_item_ptr(leaf, path->slots[0],
1738                                     struct btrfs_file_extent_item);
1739                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1740                         offset;
1741                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1742                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1743                 btrfs_set_file_extent_offset(leaf, fi, 0);
1744                 btrfs_mark_buffer_dirty(leaf);
1745                 goto out;
1746         }
1747         btrfs_release_path(path);
1748
1749         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1750                                        0, 0, end - offset, 0, end - offset,
1751                                        0, 0, 0);
1752         if (ret)
1753                 return ret;
1754
1755 out:
1756         btrfs_release_path(path);
1757
1758         hole_em = alloc_extent_map();
1759         if (!hole_em) {
1760                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1761                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1762                         &BTRFS_I(inode)->runtime_flags);
1763         } else {
1764                 hole_em->start = offset;
1765                 hole_em->len = end - offset;
1766                 hole_em->orig_start = offset;
1767
1768                 hole_em->block_start = EXTENT_MAP_HOLE;
1769                 hole_em->block_len = 0;
1770                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1771                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1772                 hole_em->generation = trans->transid;
1773
1774                 do {
1775                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1776                         write_lock(&em_tree->lock);
1777                         ret = add_extent_mapping(em_tree, hole_em);
1778                         if (!ret)
1779                                 list_move(&hole_em->list,
1780                                           &em_tree->modified_extents);
1781                         write_unlock(&em_tree->lock);
1782                 } while (ret == -EEXIST);
1783                 free_extent_map(hole_em);
1784                 if (ret)
1785                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1786                                 &BTRFS_I(inode)->runtime_flags);
1787         }
1788
1789         return 0;
1790 }
1791
1792 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1793 {
1794         struct btrfs_root *root = BTRFS_I(inode)->root;
1795         struct extent_state *cached_state = NULL;
1796         struct btrfs_path *path;
1797         struct btrfs_block_rsv *rsv;
1798         struct btrfs_trans_handle *trans;
1799         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1800         u64 lockstart = (offset + mask) & ~mask;
1801         u64 lockend = ((offset + len) & ~mask) - 1;
1802         u64 cur_offset = lockstart;
1803         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1804         u64 drop_end;
1805         unsigned long nr;
1806         int ret = 0;
1807         int err = 0;
1808         bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
1809                 ((offset + len) >> PAGE_CACHE_SHIFT);
1810
1811         btrfs_wait_ordered_range(inode, offset, len);
1812
1813         mutex_lock(&inode->i_mutex);
1814         if (offset >= inode->i_size) {
1815                 mutex_unlock(&inode->i_mutex);
1816                 return 0;
1817         }
1818
1819         /*
1820          * Only do this if we are in the same page and we aren't doing the
1821          * entire page.
1822          */
1823         if (same_page && len < PAGE_CACHE_SIZE) {
1824                 ret = btrfs_truncate_page(inode, offset, len, 0);
1825                 mutex_unlock(&inode->i_mutex);
1826                 return ret;
1827         }
1828
1829         /* zero back part of the first page */
1830         ret = btrfs_truncate_page(inode, offset, 0, 0);
1831         if (ret) {
1832                 mutex_unlock(&inode->i_mutex);
1833                 return ret;
1834         }
1835
1836         /* zero the front end of the last page */
1837         ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1838         if (ret) {
1839                 mutex_unlock(&inode->i_mutex);
1840                 return ret;
1841         }
1842
1843         if (lockend < lockstart) {
1844                 mutex_unlock(&inode->i_mutex);
1845                 return 0;
1846         }
1847
1848         while (1) {
1849                 struct btrfs_ordered_extent *ordered;
1850
1851                 truncate_pagecache_range(inode, lockstart, lockend);
1852
1853                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1854                                  0, &cached_state);
1855                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1856
1857                 /*
1858                  * We need to make sure we have no ordered extents in this range
1859                  * and nobody raced in and read a page in this range, if we did
1860                  * we need to try again.
1861                  */
1862                 if ((!ordered ||
1863                     (ordered->file_offset + ordered->len < lockstart ||
1864                      ordered->file_offset > lockend)) &&
1865                      !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1866                                      lockend, EXTENT_UPTODATE, 0,
1867                                      cached_state)) {
1868                         if (ordered)
1869                                 btrfs_put_ordered_extent(ordered);
1870                         break;
1871                 }
1872                 if (ordered)
1873                         btrfs_put_ordered_extent(ordered);
1874                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1875                                      lockend, &cached_state, GFP_NOFS);
1876                 btrfs_wait_ordered_range(inode, lockstart,
1877                                          lockend - lockstart + 1);
1878         }
1879
1880         path = btrfs_alloc_path();
1881         if (!path) {
1882                 ret = -ENOMEM;
1883                 goto out;
1884         }
1885
1886         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
1887         if (!rsv) {
1888                 ret = -ENOMEM;
1889                 goto out_free;
1890         }
1891         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1892         rsv->failfast = 1;
1893
1894         /*
1895          * 1 - update the inode
1896          * 1 - removing the extents in the range
1897          * 1 - adding the hole extent
1898          */
1899         trans = btrfs_start_transaction(root, 3);
1900         if (IS_ERR(trans)) {
1901                 err = PTR_ERR(trans);
1902                 goto out_free;
1903         }
1904
1905         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
1906                                       min_size);
1907         BUG_ON(ret);
1908         trans->block_rsv = rsv;
1909
1910         while (cur_offset < lockend) {
1911                 ret = __btrfs_drop_extents(trans, root, inode, path,
1912                                            cur_offset, lockend + 1,
1913                                            &drop_end, 1);
1914                 if (ret != -ENOSPC)
1915                         break;
1916
1917                 trans->block_rsv = &root->fs_info->trans_block_rsv;
1918
1919                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1920                 if (ret) {
1921                         err = ret;
1922                         break;
1923                 }
1924
1925                 cur_offset = drop_end;
1926
1927                 ret = btrfs_update_inode(trans, root, inode);
1928                 if (ret) {
1929                         err = ret;
1930                         break;
1931                 }
1932
1933                 nr = trans->blocks_used;
1934                 btrfs_end_transaction(trans, root);
1935                 btrfs_btree_balance_dirty(root, nr);
1936
1937                 trans = btrfs_start_transaction(root, 3);
1938                 if (IS_ERR(trans)) {
1939                         ret = PTR_ERR(trans);
1940                         trans = NULL;
1941                         break;
1942                 }
1943
1944                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
1945                                               rsv, min_size);
1946                 BUG_ON(ret);    /* shouldn't happen */
1947                 trans->block_rsv = rsv;
1948         }
1949
1950         if (ret) {
1951                 err = ret;
1952                 goto out_trans;
1953         }
1954
1955         trans->block_rsv = &root->fs_info->trans_block_rsv;
1956         ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1957         if (ret) {
1958                 err = ret;
1959                 goto out_trans;
1960         }
1961
1962 out_trans:
1963         if (!trans)
1964                 goto out_free;
1965
1966         trans->block_rsv = &root->fs_info->trans_block_rsv;
1967         ret = btrfs_update_inode(trans, root, inode);
1968         nr = trans->blocks_used;
1969         btrfs_end_transaction(trans, root);
1970         btrfs_btree_balance_dirty(root, nr);
1971 out_free:
1972         btrfs_free_path(path);
1973         btrfs_free_block_rsv(root, rsv);
1974 out:
1975         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1976                              &cached_state, GFP_NOFS);
1977         mutex_unlock(&inode->i_mutex);
1978         if (ret && !err)
1979                 err = ret;
1980         return err;
1981 }
1982
1983 static long btrfs_fallocate(struct file *file, int mode,
1984                             loff_t offset, loff_t len)
1985 {
1986         struct inode *inode = file->f_path.dentry->d_inode;
1987         struct extent_state *cached_state = NULL;
1988         u64 cur_offset;
1989         u64 last_byte;
1990         u64 alloc_start;
1991         u64 alloc_end;
1992         u64 alloc_hint = 0;
1993         u64 locked_end;
1994         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1995         struct extent_map *em;
1996         int ret;
1997
1998         alloc_start = offset & ~mask;
1999         alloc_end =  (offset + len + mask) & ~mask;
2000
2001         /* Make sure we aren't being give some crap mode */
2002         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2003                 return -EOPNOTSUPP;
2004
2005         if (mode & FALLOC_FL_PUNCH_HOLE)
2006                 return btrfs_punch_hole(inode, offset, len);
2007
2008         /*
2009          * Make sure we have enough space before we do the
2010          * allocation.
2011          */
2012         ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
2013         if (ret)
2014                 return ret;
2015
2016         /*
2017          * wait for ordered IO before we have any locks.  We'll loop again
2018          * below with the locks held.
2019          */
2020         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2021
2022         mutex_lock(&inode->i_mutex);
2023         ret = inode_newsize_ok(inode, alloc_end);
2024         if (ret)
2025                 goto out;
2026
2027         if (alloc_start > inode->i_size) {
2028                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2029                                         alloc_start);
2030                 if (ret)
2031                         goto out;
2032         }
2033
2034         locked_end = alloc_end - 1;
2035         while (1) {
2036                 struct btrfs_ordered_extent *ordered;
2037
2038                 /* the extent lock is ordered inside the running
2039                  * transaction
2040                  */
2041                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2042                                  locked_end, 0, &cached_state);
2043                 ordered = btrfs_lookup_first_ordered_extent(inode,
2044                                                             alloc_end - 1);
2045                 if (ordered &&
2046                     ordered->file_offset + ordered->len > alloc_start &&
2047                     ordered->file_offset < alloc_end) {
2048                         btrfs_put_ordered_extent(ordered);
2049                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2050                                              alloc_start, locked_end,
2051                                              &cached_state, GFP_NOFS);
2052                         /*
2053                          * we can't wait on the range with the transaction
2054                          * running or with the extent lock held
2055                          */
2056                         btrfs_wait_ordered_range(inode, alloc_start,
2057                                                  alloc_end - alloc_start);
2058                 } else {
2059                         if (ordered)
2060                                 btrfs_put_ordered_extent(ordered);
2061                         break;
2062                 }
2063         }
2064
2065         cur_offset = alloc_start;
2066         while (1) {
2067                 u64 actual_end;
2068
2069                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2070                                       alloc_end - cur_offset, 0);
2071                 if (IS_ERR_OR_NULL(em)) {
2072                         if (!em)
2073                                 ret = -ENOMEM;
2074                         else
2075                                 ret = PTR_ERR(em);
2076                         break;
2077                 }
2078                 last_byte = min(extent_map_end(em), alloc_end);
2079                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2080                 last_byte = (last_byte + mask) & ~mask;
2081
2082                 if (em->block_start == EXTENT_MAP_HOLE ||
2083                     (cur_offset >= inode->i_size &&
2084                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2085                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2086                                                         last_byte - cur_offset,
2087                                                         1 << inode->i_blkbits,
2088                                                         offset + len,
2089                                                         &alloc_hint);
2090
2091                         if (ret < 0) {
2092                                 free_extent_map(em);
2093                                 break;
2094                         }
2095                 } else if (actual_end > inode->i_size &&
2096                            !(mode & FALLOC_FL_KEEP_SIZE)) {
2097                         /*
2098                          * We didn't need to allocate any more space, but we
2099                          * still extended the size of the file so we need to
2100                          * update i_size.
2101                          */
2102                         inode->i_ctime = CURRENT_TIME;
2103                         i_size_write(inode, actual_end);
2104                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2105                 }
2106                 free_extent_map(em);
2107
2108                 cur_offset = last_byte;
2109                 if (cur_offset >= alloc_end) {
2110                         ret = 0;
2111                         break;
2112                 }
2113         }
2114         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2115                              &cached_state, GFP_NOFS);
2116 out:
2117         mutex_unlock(&inode->i_mutex);
2118         /* Let go of our reservation. */
2119         btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
2120         return ret;
2121 }
2122
2123 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2124 {
2125         struct btrfs_root *root = BTRFS_I(inode)->root;
2126         struct extent_map *em;
2127         struct extent_state *cached_state = NULL;
2128         u64 lockstart = *offset;
2129         u64 lockend = i_size_read(inode);
2130         u64 start = *offset;
2131         u64 orig_start = *offset;
2132         u64 len = i_size_read(inode);
2133         u64 last_end = 0;
2134         int ret = 0;
2135
2136         lockend = max_t(u64, root->sectorsize, lockend);
2137         if (lockend <= lockstart)
2138                 lockend = lockstart + root->sectorsize;
2139
2140         len = lockend - lockstart + 1;
2141
2142         len = max_t(u64, len, root->sectorsize);
2143         if (inode->i_size == 0)
2144                 return -ENXIO;
2145
2146         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2147                          &cached_state);
2148
2149         /*
2150          * Delalloc is such a pain.  If we have a hole and we have pending
2151          * delalloc for a portion of the hole we will get back a hole that
2152          * exists for the entire range since it hasn't been actually written
2153          * yet.  So to take care of this case we need to look for an extent just
2154          * before the position we want in case there is outstanding delalloc
2155          * going on here.
2156          */
2157         if (whence == SEEK_HOLE && start != 0) {
2158                 if (start <= root->sectorsize)
2159                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2160                                                      root->sectorsize, 0);
2161                 else
2162                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
2163                                                      start - root->sectorsize,
2164                                                      root->sectorsize, 0);
2165                 if (IS_ERR(em)) {
2166                         ret = PTR_ERR(em);
2167                         goto out;
2168                 }
2169                 last_end = em->start + em->len;
2170                 if (em->block_start == EXTENT_MAP_DELALLOC)
2171                         last_end = min_t(u64, last_end, inode->i_size);
2172                 free_extent_map(em);
2173         }
2174
2175         while (1) {
2176                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2177                 if (IS_ERR(em)) {
2178                         ret = PTR_ERR(em);
2179                         break;
2180                 }
2181
2182                 if (em->block_start == EXTENT_MAP_HOLE) {
2183                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2184                                 if (last_end <= orig_start) {
2185                                         free_extent_map(em);
2186                                         ret = -ENXIO;
2187                                         break;
2188                                 }
2189                         }
2190
2191                         if (whence == SEEK_HOLE) {
2192                                 *offset = start;
2193                                 free_extent_map(em);
2194                                 break;
2195                         }
2196                 } else {
2197                         if (whence == SEEK_DATA) {
2198                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
2199                                         if (start >= inode->i_size) {
2200                                                 free_extent_map(em);
2201                                                 ret = -ENXIO;
2202                                                 break;
2203                                         }
2204                                 }
2205
2206                                 *offset = start;
2207                                 free_extent_map(em);
2208                                 break;
2209                         }
2210                 }
2211
2212                 start = em->start + em->len;
2213                 last_end = em->start + em->len;
2214
2215                 if (em->block_start == EXTENT_MAP_DELALLOC)
2216                         last_end = min_t(u64, last_end, inode->i_size);
2217
2218                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2219                         free_extent_map(em);
2220                         ret = -ENXIO;
2221                         break;
2222                 }
2223                 free_extent_map(em);
2224                 cond_resched();
2225         }
2226         if (!ret)
2227                 *offset = min(*offset, inode->i_size);
2228 out:
2229         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2230                              &cached_state, GFP_NOFS);
2231         return ret;
2232 }
2233
2234 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2235 {
2236         struct inode *inode = file->f_mapping->host;
2237         int ret;
2238
2239         mutex_lock(&inode->i_mutex);
2240         switch (whence) {
2241         case SEEK_END:
2242         case SEEK_CUR:
2243                 offset = generic_file_llseek(file, offset, whence);
2244                 goto out;
2245         case SEEK_DATA:
2246         case SEEK_HOLE:
2247                 if (offset >= i_size_read(inode)) {
2248                         mutex_unlock(&inode->i_mutex);
2249                         return -ENXIO;
2250                 }
2251
2252                 ret = find_desired_extent(inode, &offset, whence);
2253                 if (ret) {
2254                         mutex_unlock(&inode->i_mutex);
2255                         return ret;
2256                 }
2257         }
2258
2259         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2260                 offset = -EINVAL;
2261                 goto out;
2262         }
2263         if (offset > inode->i_sb->s_maxbytes) {
2264                 offset = -EINVAL;
2265                 goto out;
2266         }
2267
2268         /* Special lock needed here? */
2269         if (offset != file->f_pos) {
2270                 file->f_pos = offset;
2271                 file->f_version = 0;
2272         }
2273 out:
2274         mutex_unlock(&inode->i_mutex);
2275         return offset;
2276 }
2277
2278 const struct file_operations btrfs_file_operations = {
2279         .llseek         = btrfs_file_llseek,
2280         .read           = do_sync_read,
2281         .write          = do_sync_write,
2282         .aio_read       = generic_file_aio_read,
2283         .splice_read    = generic_file_splice_read,
2284         .aio_write      = btrfs_file_aio_write,
2285         .mmap           = btrfs_file_mmap,
2286         .open           = generic_file_open,
2287         .release        = btrfs_release_file,
2288         .fsync          = btrfs_sync_file,
2289         .fallocate      = btrfs_fallocate,
2290         .unlocked_ioctl = btrfs_ioctl,
2291 #ifdef CONFIG_COMPAT
2292         .compat_ioctl   = btrfs_ioctl,
2293 #endif
2294 };