]> Pileus Git - ~andy/linux/blob - fs/btrfs/file.c
Btrfs: btrfs_drop_extent_cache should never fail
[~andy/linux] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43
44 /*
45  * when auto defrag is enabled we
46  * queue up these defrag structs to remember which
47  * inodes need defragging passes
48  */
49 struct inode_defrag {
50         struct rb_node rb_node;
51         /* objectid */
52         u64 ino;
53         /*
54          * transid where the defrag was added, we search for
55          * extents newer than this
56          */
57         u64 transid;
58
59         /* root objectid */
60         u64 root;
61
62         /* last offset we were able to defrag */
63         u64 last_offset;
64
65         /* if we've wrapped around back to zero once already */
66         int cycled;
67 };
68
69 static int __compare_inode_defrag(struct inode_defrag *defrag1,
70                                   struct inode_defrag *defrag2)
71 {
72         if (defrag1->root > defrag2->root)
73                 return 1;
74         else if (defrag1->root < defrag2->root)
75                 return -1;
76         else if (defrag1->ino > defrag2->ino)
77                 return 1;
78         else if (defrag1->ino < defrag2->ino)
79                 return -1;
80         else
81                 return 0;
82 }
83
84 /* pop a record for an inode into the defrag tree.  The lock
85  * must be held already
86  *
87  * If you're inserting a record for an older transid than an
88  * existing record, the transid already in the tree is lowered
89  *
90  * If an existing record is found the defrag item you
91  * pass in is freed
92  */
93 static void __btrfs_add_inode_defrag(struct inode *inode,
94                                     struct inode_defrag *defrag)
95 {
96         struct btrfs_root *root = BTRFS_I(inode)->root;
97         struct inode_defrag *entry;
98         struct rb_node **p;
99         struct rb_node *parent = NULL;
100         int ret;
101
102         p = &root->fs_info->defrag_inodes.rb_node;
103         while (*p) {
104                 parent = *p;
105                 entry = rb_entry(parent, struct inode_defrag, rb_node);
106
107                 ret = __compare_inode_defrag(defrag, entry);
108                 if (ret < 0)
109                         p = &parent->rb_left;
110                 else if (ret > 0)
111                         p = &parent->rb_right;
112                 else {
113                         /* if we're reinserting an entry for
114                          * an old defrag run, make sure to
115                          * lower the transid of our existing record
116                          */
117                         if (defrag->transid < entry->transid)
118                                 entry->transid = defrag->transid;
119                         if (defrag->last_offset > entry->last_offset)
120                                 entry->last_offset = defrag->last_offset;
121                         goto exists;
122                 }
123         }
124         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
125         rb_link_node(&defrag->rb_node, parent, p);
126         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
127         return;
128
129 exists:
130         kfree(defrag);
131         return;
132
133 }
134
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140                            struct inode *inode)
141 {
142         struct btrfs_root *root = BTRFS_I(inode)->root;
143         struct inode_defrag *defrag;
144         u64 transid;
145
146         if (!btrfs_test_opt(root, AUTO_DEFRAG))
147                 return 0;
148
149         if (btrfs_fs_closing(root->fs_info))
150                 return 0;
151
152         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
153                 return 0;
154
155         if (trans)
156                 transid = trans->transid;
157         else
158                 transid = BTRFS_I(inode)->root->last_trans;
159
160         defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
161         if (!defrag)
162                 return -ENOMEM;
163
164         defrag->ino = btrfs_ino(inode);
165         defrag->transid = transid;
166         defrag->root = root->root_key.objectid;
167
168         spin_lock(&root->fs_info->defrag_inodes_lock);
169         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
170                 __btrfs_add_inode_defrag(inode, defrag);
171         else
172                 kfree(defrag);
173         spin_unlock(&root->fs_info->defrag_inodes_lock);
174         return 0;
175 }
176
177 /*
178  * must be called with the defrag_inodes lock held
179  */
180 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
181                                              u64 root, u64 ino,
182                                              struct rb_node **next)
183 {
184         struct inode_defrag *entry = NULL;
185         struct inode_defrag tmp;
186         struct rb_node *p;
187         struct rb_node *parent = NULL;
188         int ret;
189
190         tmp.ino = ino;
191         tmp.root = root;
192
193         p = info->defrag_inodes.rb_node;
194         while (p) {
195                 parent = p;
196                 entry = rb_entry(parent, struct inode_defrag, rb_node);
197
198                 ret = __compare_inode_defrag(&tmp, entry);
199                 if (ret < 0)
200                         p = parent->rb_left;
201                 else if (ret > 0)
202                         p = parent->rb_right;
203                 else
204                         return entry;
205         }
206
207         if (next) {
208                 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
209                         parent = rb_next(parent);
210                         entry = rb_entry(parent, struct inode_defrag, rb_node);
211                 }
212                 *next = parent;
213         }
214         return NULL;
215 }
216
217 /*
218  * run through the list of inodes in the FS that need
219  * defragging
220  */
221 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
222 {
223         struct inode_defrag *defrag;
224         struct btrfs_root *inode_root;
225         struct inode *inode;
226         struct rb_node *n;
227         struct btrfs_key key;
228         struct btrfs_ioctl_defrag_range_args range;
229         u64 first_ino = 0;
230         u64 root_objectid = 0;
231         int num_defrag;
232         int defrag_batch = 1024;
233
234         memset(&range, 0, sizeof(range));
235         range.len = (u64)-1;
236
237         atomic_inc(&fs_info->defrag_running);
238         spin_lock(&fs_info->defrag_inodes_lock);
239         while(1) {
240                 n = NULL;
241
242                 /* find an inode to defrag */
243                 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
244                                                  first_ino, &n);
245                 if (!defrag) {
246                         if (n) {
247                                 defrag = rb_entry(n, struct inode_defrag,
248                                                   rb_node);
249                         } else if (root_objectid || first_ino) {
250                                 root_objectid = 0;
251                                 first_ino = 0;
252                                 continue;
253                         } else {
254                                 break;
255                         }
256                 }
257
258                 /* remove it from the rbtree */
259                 first_ino = defrag->ino + 1;
260                 root_objectid = defrag->root;
261                 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
262
263                 if (btrfs_fs_closing(fs_info))
264                         goto next_free;
265
266                 spin_unlock(&fs_info->defrag_inodes_lock);
267
268                 /* get the inode */
269                 key.objectid = defrag->root;
270                 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
271                 key.offset = (u64)-1;
272                 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
273                 if (IS_ERR(inode_root))
274                         goto next;
275
276                 key.objectid = defrag->ino;
277                 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
278                 key.offset = 0;
279
280                 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
281                 if (IS_ERR(inode))
282                         goto next;
283
284                 /* do a chunk of defrag */
285                 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
286                 range.start = defrag->last_offset;
287                 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
288                                                defrag_batch);
289                 /*
290                  * if we filled the whole defrag batch, there
291                  * must be more work to do.  Queue this defrag
292                  * again
293                  */
294                 if (num_defrag == defrag_batch) {
295                         defrag->last_offset = range.start;
296                         __btrfs_add_inode_defrag(inode, defrag);
297                         /*
298                          * we don't want to kfree defrag, we added it back to
299                          * the rbtree
300                          */
301                         defrag = NULL;
302                 } else if (defrag->last_offset && !defrag->cycled) {
303                         /*
304                          * we didn't fill our defrag batch, but
305                          * we didn't start at zero.  Make sure we loop
306                          * around to the start of the file.
307                          */
308                         defrag->last_offset = 0;
309                         defrag->cycled = 1;
310                         __btrfs_add_inode_defrag(inode, defrag);
311                         defrag = NULL;
312                 }
313
314                 iput(inode);
315 next:
316                 spin_lock(&fs_info->defrag_inodes_lock);
317 next_free:
318                 kfree(defrag);
319         }
320         spin_unlock(&fs_info->defrag_inodes_lock);
321
322         atomic_dec(&fs_info->defrag_running);
323
324         /*
325          * during unmount, we use the transaction_wait queue to
326          * wait for the defragger to stop
327          */
328         wake_up(&fs_info->transaction_wait);
329         return 0;
330 }
331
332 /* simple helper to fault in pages and copy.  This should go away
333  * and be replaced with calls into generic code.
334  */
335 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
336                                          size_t write_bytes,
337                                          struct page **prepared_pages,
338                                          struct iov_iter *i)
339 {
340         size_t copied = 0;
341         size_t total_copied = 0;
342         int pg = 0;
343         int offset = pos & (PAGE_CACHE_SIZE - 1);
344
345         while (write_bytes > 0) {
346                 size_t count = min_t(size_t,
347                                      PAGE_CACHE_SIZE - offset, write_bytes);
348                 struct page *page = prepared_pages[pg];
349                 /*
350                  * Copy data from userspace to the current page
351                  *
352                  * Disable pagefault to avoid recursive lock since
353                  * the pages are already locked
354                  */
355                 pagefault_disable();
356                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
357                 pagefault_enable();
358
359                 /* Flush processor's dcache for this page */
360                 flush_dcache_page(page);
361
362                 /*
363                  * if we get a partial write, we can end up with
364                  * partially up to date pages.  These add
365                  * a lot of complexity, so make sure they don't
366                  * happen by forcing this copy to be retried.
367                  *
368                  * The rest of the btrfs_file_write code will fall
369                  * back to page at a time copies after we return 0.
370                  */
371                 if (!PageUptodate(page) && copied < count)
372                         copied = 0;
373
374                 iov_iter_advance(i, copied);
375                 write_bytes -= copied;
376                 total_copied += copied;
377
378                 /* Return to btrfs_file_aio_write to fault page */
379                 if (unlikely(copied == 0))
380                         break;
381
382                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
383                         offset += copied;
384                 } else {
385                         pg++;
386                         offset = 0;
387                 }
388         }
389         return total_copied;
390 }
391
392 /*
393  * unlocks pages after btrfs_file_write is done with them
394  */
395 void btrfs_drop_pages(struct page **pages, size_t num_pages)
396 {
397         size_t i;
398         for (i = 0; i < num_pages; i++) {
399                 /* page checked is some magic around finding pages that
400                  * have been modified without going through btrfs_set_page_dirty
401                  * clear it here
402                  */
403                 ClearPageChecked(pages[i]);
404                 unlock_page(pages[i]);
405                 mark_page_accessed(pages[i]);
406                 page_cache_release(pages[i]);
407         }
408 }
409
410 /*
411  * after copy_from_user, pages need to be dirtied and we need to make
412  * sure holes are created between the current EOF and the start of
413  * any next extents (if required).
414  *
415  * this also makes the decision about creating an inline extent vs
416  * doing real data extents, marking pages dirty and delalloc as required.
417  */
418 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
419                       struct page **pages, size_t num_pages,
420                       loff_t pos, size_t write_bytes,
421                       struct extent_state **cached)
422 {
423         int err = 0;
424         int i;
425         u64 num_bytes;
426         u64 start_pos;
427         u64 end_of_last_block;
428         u64 end_pos = pos + write_bytes;
429         loff_t isize = i_size_read(inode);
430
431         start_pos = pos & ~((u64)root->sectorsize - 1);
432         num_bytes = (write_bytes + pos - start_pos +
433                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
434
435         end_of_last_block = start_pos + num_bytes - 1;
436         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
437                                         cached);
438         if (err)
439                 return err;
440
441         for (i = 0; i < num_pages; i++) {
442                 struct page *p = pages[i];
443                 SetPageUptodate(p);
444                 ClearPageChecked(p);
445                 set_page_dirty(p);
446         }
447
448         /*
449          * we've only changed i_size in ram, and we haven't updated
450          * the disk i_size.  There is no need to log the inode
451          * at this time.
452          */
453         if (end_pos > isize)
454                 i_size_write(inode, end_pos);
455         return 0;
456 }
457
458 /*
459  * this drops all the extents in the cache that intersect the range
460  * [start, end].  Existing extents are split as required.
461  */
462 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
463                              int skip_pinned)
464 {
465         struct extent_map *em;
466         struct extent_map *split = NULL;
467         struct extent_map *split2 = NULL;
468         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
469         u64 len = end - start + 1;
470         u64 gen;
471         int ret;
472         int testend = 1;
473         unsigned long flags;
474         int compressed = 0;
475
476         WARN_ON(end < start);
477         if (end == (u64)-1) {
478                 len = (u64)-1;
479                 testend = 0;
480         }
481         while (1) {
482                 int no_splits = 0;
483
484                 if (!split)
485                         split = alloc_extent_map();
486                 if (!split2)
487                         split2 = alloc_extent_map();
488                 if (!split || !split2)
489                         no_splits = 1;
490
491                 write_lock(&em_tree->lock);
492                 em = lookup_extent_mapping(em_tree, start, len);
493                 if (!em) {
494                         write_unlock(&em_tree->lock);
495                         break;
496                 }
497                 flags = em->flags;
498                 gen = em->generation;
499                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
500                         if (testend && em->start + em->len >= start + len) {
501                                 free_extent_map(em);
502                                 write_unlock(&em_tree->lock);
503                                 break;
504                         }
505                         start = em->start + em->len;
506                         if (testend)
507                                 len = start + len - (em->start + em->len);
508                         free_extent_map(em);
509                         write_unlock(&em_tree->lock);
510                         continue;
511                 }
512                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
513                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
514                 remove_extent_mapping(em_tree, em);
515                 if (no_splits)
516                         goto next;
517
518                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
519                     em->start < start) {
520                         split->start = em->start;
521                         split->len = start - em->start;
522                         split->orig_start = em->orig_start;
523                         split->block_start = em->block_start;
524
525                         if (compressed)
526                                 split->block_len = em->block_len;
527                         else
528                                 split->block_len = split->len;
529                         split->generation = gen;
530                         split->bdev = em->bdev;
531                         split->flags = flags;
532                         split->compress_type = em->compress_type;
533                         ret = add_extent_mapping(em_tree, split);
534                         BUG_ON(ret); /* Logic error */
535                         list_move(&split->list, &em_tree->modified_extents);
536                         free_extent_map(split);
537                         split = split2;
538                         split2 = NULL;
539                 }
540                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
541                     testend && em->start + em->len > start + len) {
542                         u64 diff = start + len - em->start;
543
544                         split->start = start + len;
545                         split->len = em->start + em->len - (start + len);
546                         split->bdev = em->bdev;
547                         split->flags = flags;
548                         split->compress_type = em->compress_type;
549                         split->generation = gen;
550
551                         if (compressed) {
552                                 split->block_len = em->block_len;
553                                 split->block_start = em->block_start;
554                                 split->orig_start = em->orig_start;
555                         } else {
556                                 split->block_len = split->len;
557                                 split->block_start = em->block_start + diff;
558                                 split->orig_start = split->start;
559                         }
560
561                         ret = add_extent_mapping(em_tree, split);
562                         BUG_ON(ret); /* Logic error */
563                         list_move(&split->list, &em_tree->modified_extents);
564                         free_extent_map(split);
565                         split = NULL;
566                 }
567 next:
568                 write_unlock(&em_tree->lock);
569
570                 /* once for us */
571                 free_extent_map(em);
572                 /* once for the tree*/
573                 free_extent_map(em);
574         }
575         if (split)
576                 free_extent_map(split);
577         if (split2)
578                 free_extent_map(split2);
579 }
580
581 /*
582  * this is very complex, but the basic idea is to drop all extents
583  * in the range start - end.  hint_block is filled in with a block number
584  * that would be a good hint to the block allocator for this file.
585  *
586  * If an extent intersects the range but is not entirely inside the range
587  * it is either truncated or split.  Anything entirely inside the range
588  * is deleted from the tree.
589  */
590 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
591                          struct btrfs_root *root, struct inode *inode,
592                          struct btrfs_path *path, u64 start, u64 end,
593                          u64 *drop_end, int drop_cache)
594 {
595         struct extent_buffer *leaf;
596         struct btrfs_file_extent_item *fi;
597         struct btrfs_key key;
598         struct btrfs_key new_key;
599         u64 ino = btrfs_ino(inode);
600         u64 search_start = start;
601         u64 disk_bytenr = 0;
602         u64 num_bytes = 0;
603         u64 extent_offset = 0;
604         u64 extent_end = 0;
605         int del_nr = 0;
606         int del_slot = 0;
607         int extent_type;
608         int recow;
609         int ret;
610         int modify_tree = -1;
611         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
612
613         if (drop_cache)
614                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
615
616         if (start >= BTRFS_I(inode)->disk_i_size)
617                 modify_tree = 0;
618
619         while (1) {
620                 recow = 0;
621                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
622                                                search_start, modify_tree);
623                 if (ret < 0)
624                         break;
625                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
626                         leaf = path->nodes[0];
627                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
628                         if (key.objectid == ino &&
629                             key.type == BTRFS_EXTENT_DATA_KEY)
630                                 path->slots[0]--;
631                 }
632                 ret = 0;
633 next_slot:
634                 leaf = path->nodes[0];
635                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
636                         BUG_ON(del_nr > 0);
637                         ret = btrfs_next_leaf(root, path);
638                         if (ret < 0)
639                                 break;
640                         if (ret > 0) {
641                                 ret = 0;
642                                 break;
643                         }
644                         leaf = path->nodes[0];
645                         recow = 1;
646                 }
647
648                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
649                 if (key.objectid > ino ||
650                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
651                         break;
652
653                 fi = btrfs_item_ptr(leaf, path->slots[0],
654                                     struct btrfs_file_extent_item);
655                 extent_type = btrfs_file_extent_type(leaf, fi);
656
657                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
658                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
659                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
660                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
661                         extent_offset = btrfs_file_extent_offset(leaf, fi);
662                         extent_end = key.offset +
663                                 btrfs_file_extent_num_bytes(leaf, fi);
664                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
665                         extent_end = key.offset +
666                                 btrfs_file_extent_inline_len(leaf, fi);
667                 } else {
668                         WARN_ON(1);
669                         extent_end = search_start;
670                 }
671
672                 if (extent_end <= search_start) {
673                         path->slots[0]++;
674                         goto next_slot;
675                 }
676
677                 search_start = max(key.offset, start);
678                 if (recow || !modify_tree) {
679                         modify_tree = -1;
680                         btrfs_release_path(path);
681                         continue;
682                 }
683
684                 /*
685                  *     | - range to drop - |
686                  *  | -------- extent -------- |
687                  */
688                 if (start > key.offset && end < extent_end) {
689                         BUG_ON(del_nr > 0);
690                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
691
692                         memcpy(&new_key, &key, sizeof(new_key));
693                         new_key.offset = start;
694                         ret = btrfs_duplicate_item(trans, root, path,
695                                                    &new_key);
696                         if (ret == -EAGAIN) {
697                                 btrfs_release_path(path);
698                                 continue;
699                         }
700                         if (ret < 0)
701                                 break;
702
703                         leaf = path->nodes[0];
704                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
705                                             struct btrfs_file_extent_item);
706                         btrfs_set_file_extent_num_bytes(leaf, fi,
707                                                         start - key.offset);
708
709                         fi = btrfs_item_ptr(leaf, path->slots[0],
710                                             struct btrfs_file_extent_item);
711
712                         extent_offset += start - key.offset;
713                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
714                         btrfs_set_file_extent_num_bytes(leaf, fi,
715                                                         extent_end - start);
716                         btrfs_mark_buffer_dirty(leaf);
717
718                         if (update_refs && disk_bytenr > 0) {
719                                 ret = btrfs_inc_extent_ref(trans, root,
720                                                 disk_bytenr, num_bytes, 0,
721                                                 root->root_key.objectid,
722                                                 new_key.objectid,
723                                                 start - extent_offset, 0);
724                                 BUG_ON(ret); /* -ENOMEM */
725                         }
726                         key.offset = start;
727                 }
728                 /*
729                  *  | ---- range to drop ----- |
730                  *      | -------- extent -------- |
731                  */
732                 if (start <= key.offset && end < extent_end) {
733                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
734
735                         memcpy(&new_key, &key, sizeof(new_key));
736                         new_key.offset = end;
737                         btrfs_set_item_key_safe(trans, root, path, &new_key);
738
739                         extent_offset += end - key.offset;
740                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
741                         btrfs_set_file_extent_num_bytes(leaf, fi,
742                                                         extent_end - end);
743                         btrfs_mark_buffer_dirty(leaf);
744                         if (update_refs && disk_bytenr > 0)
745                                 inode_sub_bytes(inode, end - key.offset);
746                         break;
747                 }
748
749                 search_start = extent_end;
750                 /*
751                  *       | ---- range to drop ----- |
752                  *  | -------- extent -------- |
753                  */
754                 if (start > key.offset && end >= extent_end) {
755                         BUG_ON(del_nr > 0);
756                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
757
758                         btrfs_set_file_extent_num_bytes(leaf, fi,
759                                                         start - key.offset);
760                         btrfs_mark_buffer_dirty(leaf);
761                         if (update_refs && disk_bytenr > 0)
762                                 inode_sub_bytes(inode, extent_end - start);
763                         if (end == extent_end)
764                                 break;
765
766                         path->slots[0]++;
767                         goto next_slot;
768                 }
769
770                 /*
771                  *  | ---- range to drop ----- |
772                  *    | ------ extent ------ |
773                  */
774                 if (start <= key.offset && end >= extent_end) {
775                         if (del_nr == 0) {
776                                 del_slot = path->slots[0];
777                                 del_nr = 1;
778                         } else {
779                                 BUG_ON(del_slot + del_nr != path->slots[0]);
780                                 del_nr++;
781                         }
782
783                         if (update_refs &&
784                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
785                                 inode_sub_bytes(inode,
786                                                 extent_end - key.offset);
787                                 extent_end = ALIGN(extent_end,
788                                                    root->sectorsize);
789                         } else if (update_refs && disk_bytenr > 0) {
790                                 ret = btrfs_free_extent(trans, root,
791                                                 disk_bytenr, num_bytes, 0,
792                                                 root->root_key.objectid,
793                                                 key.objectid, key.offset -
794                                                 extent_offset, 0);
795                                 BUG_ON(ret); /* -ENOMEM */
796                                 inode_sub_bytes(inode,
797                                                 extent_end - key.offset);
798                         }
799
800                         if (end == extent_end)
801                                 break;
802
803                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
804                                 path->slots[0]++;
805                                 goto next_slot;
806                         }
807
808                         ret = btrfs_del_items(trans, root, path, del_slot,
809                                               del_nr);
810                         if (ret) {
811                                 btrfs_abort_transaction(trans, root, ret);
812                                 break;
813                         }
814
815                         del_nr = 0;
816                         del_slot = 0;
817
818                         btrfs_release_path(path);
819                         continue;
820                 }
821
822                 BUG_ON(1);
823         }
824
825         if (!ret && del_nr > 0) {
826                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
827                 if (ret)
828                         btrfs_abort_transaction(trans, root, ret);
829         }
830
831         if (drop_end)
832                 *drop_end = min(end, extent_end);
833         btrfs_release_path(path);
834         return ret;
835 }
836
837 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
838                        struct btrfs_root *root, struct inode *inode, u64 start,
839                        u64 end, int drop_cache)
840 {
841         struct btrfs_path *path;
842         int ret;
843
844         path = btrfs_alloc_path();
845         if (!path)
846                 return -ENOMEM;
847         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
848                                    drop_cache);
849         btrfs_free_path(path);
850         return ret;
851 }
852
853 static int extent_mergeable(struct extent_buffer *leaf, int slot,
854                             u64 objectid, u64 bytenr, u64 orig_offset,
855                             u64 *start, u64 *end)
856 {
857         struct btrfs_file_extent_item *fi;
858         struct btrfs_key key;
859         u64 extent_end;
860
861         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
862                 return 0;
863
864         btrfs_item_key_to_cpu(leaf, &key, slot);
865         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
866                 return 0;
867
868         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
869         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
870             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
871             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
872             btrfs_file_extent_compression(leaf, fi) ||
873             btrfs_file_extent_encryption(leaf, fi) ||
874             btrfs_file_extent_other_encoding(leaf, fi))
875                 return 0;
876
877         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
878         if ((*start && *start != key.offset) || (*end && *end != extent_end))
879                 return 0;
880
881         *start = key.offset;
882         *end = extent_end;
883         return 1;
884 }
885
886 /*
887  * Mark extent in the range start - end as written.
888  *
889  * This changes extent type from 'pre-allocated' to 'regular'. If only
890  * part of extent is marked as written, the extent will be split into
891  * two or three.
892  */
893 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
894                               struct inode *inode, u64 start, u64 end)
895 {
896         struct btrfs_root *root = BTRFS_I(inode)->root;
897         struct extent_buffer *leaf;
898         struct btrfs_path *path;
899         struct btrfs_file_extent_item *fi;
900         struct btrfs_key key;
901         struct btrfs_key new_key;
902         u64 bytenr;
903         u64 num_bytes;
904         u64 extent_end;
905         u64 orig_offset;
906         u64 other_start;
907         u64 other_end;
908         u64 split;
909         int del_nr = 0;
910         int del_slot = 0;
911         int recow;
912         int ret;
913         u64 ino = btrfs_ino(inode);
914
915         path = btrfs_alloc_path();
916         if (!path)
917                 return -ENOMEM;
918 again:
919         recow = 0;
920         split = start;
921         key.objectid = ino;
922         key.type = BTRFS_EXTENT_DATA_KEY;
923         key.offset = split;
924
925         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
926         if (ret < 0)
927                 goto out;
928         if (ret > 0 && path->slots[0] > 0)
929                 path->slots[0]--;
930
931         leaf = path->nodes[0];
932         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
933         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
934         fi = btrfs_item_ptr(leaf, path->slots[0],
935                             struct btrfs_file_extent_item);
936         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
937                BTRFS_FILE_EXTENT_PREALLOC);
938         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
939         BUG_ON(key.offset > start || extent_end < end);
940
941         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
942         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
943         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
944         memcpy(&new_key, &key, sizeof(new_key));
945
946         if (start == key.offset && end < extent_end) {
947                 other_start = 0;
948                 other_end = start;
949                 if (extent_mergeable(leaf, path->slots[0] - 1,
950                                      ino, bytenr, orig_offset,
951                                      &other_start, &other_end)) {
952                         new_key.offset = end;
953                         btrfs_set_item_key_safe(trans, root, path, &new_key);
954                         fi = btrfs_item_ptr(leaf, path->slots[0],
955                                             struct btrfs_file_extent_item);
956                         btrfs_set_file_extent_generation(leaf, fi,
957                                                          trans->transid);
958                         btrfs_set_file_extent_num_bytes(leaf, fi,
959                                                         extent_end - end);
960                         btrfs_set_file_extent_offset(leaf, fi,
961                                                      end - orig_offset);
962                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
963                                             struct btrfs_file_extent_item);
964                         btrfs_set_file_extent_generation(leaf, fi,
965                                                          trans->transid);
966                         btrfs_set_file_extent_num_bytes(leaf, fi,
967                                                         end - other_start);
968                         btrfs_mark_buffer_dirty(leaf);
969                         goto out;
970                 }
971         }
972
973         if (start > key.offset && end == extent_end) {
974                 other_start = end;
975                 other_end = 0;
976                 if (extent_mergeable(leaf, path->slots[0] + 1,
977                                      ino, bytenr, orig_offset,
978                                      &other_start, &other_end)) {
979                         fi = btrfs_item_ptr(leaf, path->slots[0],
980                                             struct btrfs_file_extent_item);
981                         btrfs_set_file_extent_num_bytes(leaf, fi,
982                                                         start - key.offset);
983                         btrfs_set_file_extent_generation(leaf, fi,
984                                                          trans->transid);
985                         path->slots[0]++;
986                         new_key.offset = start;
987                         btrfs_set_item_key_safe(trans, root, path, &new_key);
988
989                         fi = btrfs_item_ptr(leaf, path->slots[0],
990                                             struct btrfs_file_extent_item);
991                         btrfs_set_file_extent_generation(leaf, fi,
992                                                          trans->transid);
993                         btrfs_set_file_extent_num_bytes(leaf, fi,
994                                                         other_end - start);
995                         btrfs_set_file_extent_offset(leaf, fi,
996                                                      start - orig_offset);
997                         btrfs_mark_buffer_dirty(leaf);
998                         goto out;
999                 }
1000         }
1001
1002         while (start > key.offset || end < extent_end) {
1003                 if (key.offset == start)
1004                         split = end;
1005
1006                 new_key.offset = split;
1007                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1008                 if (ret == -EAGAIN) {
1009                         btrfs_release_path(path);
1010                         goto again;
1011                 }
1012                 if (ret < 0) {
1013                         btrfs_abort_transaction(trans, root, ret);
1014                         goto out;
1015                 }
1016
1017                 leaf = path->nodes[0];
1018                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1019                                     struct btrfs_file_extent_item);
1020                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1021                 btrfs_set_file_extent_num_bytes(leaf, fi,
1022                                                 split - key.offset);
1023
1024                 fi = btrfs_item_ptr(leaf, path->slots[0],
1025                                     struct btrfs_file_extent_item);
1026
1027                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1028                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1029                 btrfs_set_file_extent_num_bytes(leaf, fi,
1030                                                 extent_end - split);
1031                 btrfs_mark_buffer_dirty(leaf);
1032
1033                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1034                                            root->root_key.objectid,
1035                                            ino, orig_offset, 0);
1036                 BUG_ON(ret); /* -ENOMEM */
1037
1038                 if (split == start) {
1039                         key.offset = start;
1040                 } else {
1041                         BUG_ON(start != key.offset);
1042                         path->slots[0]--;
1043                         extent_end = end;
1044                 }
1045                 recow = 1;
1046         }
1047
1048         other_start = end;
1049         other_end = 0;
1050         if (extent_mergeable(leaf, path->slots[0] + 1,
1051                              ino, bytenr, orig_offset,
1052                              &other_start, &other_end)) {
1053                 if (recow) {
1054                         btrfs_release_path(path);
1055                         goto again;
1056                 }
1057                 extent_end = other_end;
1058                 del_slot = path->slots[0] + 1;
1059                 del_nr++;
1060                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1061                                         0, root->root_key.objectid,
1062                                         ino, orig_offset, 0);
1063                 BUG_ON(ret); /* -ENOMEM */
1064         }
1065         other_start = 0;
1066         other_end = start;
1067         if (extent_mergeable(leaf, path->slots[0] - 1,
1068                              ino, bytenr, orig_offset,
1069                              &other_start, &other_end)) {
1070                 if (recow) {
1071                         btrfs_release_path(path);
1072                         goto again;
1073                 }
1074                 key.offset = other_start;
1075                 del_slot = path->slots[0];
1076                 del_nr++;
1077                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1078                                         0, root->root_key.objectid,
1079                                         ino, orig_offset, 0);
1080                 BUG_ON(ret); /* -ENOMEM */
1081         }
1082         if (del_nr == 0) {
1083                 fi = btrfs_item_ptr(leaf, path->slots[0],
1084                            struct btrfs_file_extent_item);
1085                 btrfs_set_file_extent_type(leaf, fi,
1086                                            BTRFS_FILE_EXTENT_REG);
1087                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1088                 btrfs_mark_buffer_dirty(leaf);
1089         } else {
1090                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1091                            struct btrfs_file_extent_item);
1092                 btrfs_set_file_extent_type(leaf, fi,
1093                                            BTRFS_FILE_EXTENT_REG);
1094                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1095                 btrfs_set_file_extent_num_bytes(leaf, fi,
1096                                                 extent_end - key.offset);
1097                 btrfs_mark_buffer_dirty(leaf);
1098
1099                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1100                 if (ret < 0) {
1101                         btrfs_abort_transaction(trans, root, ret);
1102                         goto out;
1103                 }
1104         }
1105 out:
1106         btrfs_free_path(path);
1107         return 0;
1108 }
1109
1110 /*
1111  * on error we return an unlocked page and the error value
1112  * on success we return a locked page and 0
1113  */
1114 static int prepare_uptodate_page(struct page *page, u64 pos,
1115                                  bool force_uptodate)
1116 {
1117         int ret = 0;
1118
1119         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1120             !PageUptodate(page)) {
1121                 ret = btrfs_readpage(NULL, page);
1122                 if (ret)
1123                         return ret;
1124                 lock_page(page);
1125                 if (!PageUptodate(page)) {
1126                         unlock_page(page);
1127                         return -EIO;
1128                 }
1129         }
1130         return 0;
1131 }
1132
1133 /*
1134  * this gets pages into the page cache and locks them down, it also properly
1135  * waits for data=ordered extents to finish before allowing the pages to be
1136  * modified.
1137  */
1138 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1139                          struct page **pages, size_t num_pages,
1140                          loff_t pos, unsigned long first_index,
1141                          size_t write_bytes, bool force_uptodate)
1142 {
1143         struct extent_state *cached_state = NULL;
1144         int i;
1145         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1146         struct inode *inode = fdentry(file)->d_inode;
1147         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1148         int err = 0;
1149         int faili = 0;
1150         u64 start_pos;
1151         u64 last_pos;
1152
1153         start_pos = pos & ~((u64)root->sectorsize - 1);
1154         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1155
1156 again:
1157         for (i = 0; i < num_pages; i++) {
1158                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1159                                                mask | __GFP_WRITE);
1160                 if (!pages[i]) {
1161                         faili = i - 1;
1162                         err = -ENOMEM;
1163                         goto fail;
1164                 }
1165
1166                 if (i == 0)
1167                         err = prepare_uptodate_page(pages[i], pos,
1168                                                     force_uptodate);
1169                 if (i == num_pages - 1)
1170                         err = prepare_uptodate_page(pages[i],
1171                                                     pos + write_bytes, false);
1172                 if (err) {
1173                         page_cache_release(pages[i]);
1174                         faili = i - 1;
1175                         goto fail;
1176                 }
1177                 wait_on_page_writeback(pages[i]);
1178         }
1179         err = 0;
1180         if (start_pos < inode->i_size) {
1181                 struct btrfs_ordered_extent *ordered;
1182                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1183                                  start_pos, last_pos - 1, 0, &cached_state);
1184                 ordered = btrfs_lookup_first_ordered_extent(inode,
1185                                                             last_pos - 1);
1186                 if (ordered &&
1187                     ordered->file_offset + ordered->len > start_pos &&
1188                     ordered->file_offset < last_pos) {
1189                         btrfs_put_ordered_extent(ordered);
1190                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1191                                              start_pos, last_pos - 1,
1192                                              &cached_state, GFP_NOFS);
1193                         for (i = 0; i < num_pages; i++) {
1194                                 unlock_page(pages[i]);
1195                                 page_cache_release(pages[i]);
1196                         }
1197                         btrfs_wait_ordered_range(inode, start_pos,
1198                                                  last_pos - start_pos);
1199                         goto again;
1200                 }
1201                 if (ordered)
1202                         btrfs_put_ordered_extent(ordered);
1203
1204                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1205                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1206                                   EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
1207                                   GFP_NOFS);
1208                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1209                                      start_pos, last_pos - 1, &cached_state,
1210                                      GFP_NOFS);
1211         }
1212         for (i = 0; i < num_pages; i++) {
1213                 if (clear_page_dirty_for_io(pages[i]))
1214                         account_page_redirty(pages[i]);
1215                 set_page_extent_mapped(pages[i]);
1216                 WARN_ON(!PageLocked(pages[i]));
1217         }
1218         return 0;
1219 fail:
1220         while (faili >= 0) {
1221                 unlock_page(pages[faili]);
1222                 page_cache_release(pages[faili]);
1223                 faili--;
1224         }
1225         return err;
1226
1227 }
1228
1229 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1230                                                struct iov_iter *i,
1231                                                loff_t pos)
1232 {
1233         struct inode *inode = fdentry(file)->d_inode;
1234         struct btrfs_root *root = BTRFS_I(inode)->root;
1235         struct page **pages = NULL;
1236         unsigned long first_index;
1237         size_t num_written = 0;
1238         int nrptrs;
1239         int ret = 0;
1240         bool force_page_uptodate = false;
1241
1242         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1243                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1244                      (sizeof(struct page *)));
1245         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1246         nrptrs = max(nrptrs, 8);
1247         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1248         if (!pages)
1249                 return -ENOMEM;
1250
1251         first_index = pos >> PAGE_CACHE_SHIFT;
1252
1253         while (iov_iter_count(i) > 0) {
1254                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1255                 size_t write_bytes = min(iov_iter_count(i),
1256                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1257                                          offset);
1258                 size_t num_pages = (write_bytes + offset +
1259                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1260                 size_t dirty_pages;
1261                 size_t copied;
1262
1263                 WARN_ON(num_pages > nrptrs);
1264
1265                 /*
1266                  * Fault pages before locking them in prepare_pages
1267                  * to avoid recursive lock
1268                  */
1269                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1270                         ret = -EFAULT;
1271                         break;
1272                 }
1273
1274                 ret = btrfs_delalloc_reserve_space(inode,
1275                                         num_pages << PAGE_CACHE_SHIFT);
1276                 if (ret)
1277                         break;
1278
1279                 /*
1280                  * This is going to setup the pages array with the number of
1281                  * pages we want, so we don't really need to worry about the
1282                  * contents of pages from loop to loop
1283                  */
1284                 ret = prepare_pages(root, file, pages, num_pages,
1285                                     pos, first_index, write_bytes,
1286                                     force_page_uptodate);
1287                 if (ret) {
1288                         btrfs_delalloc_release_space(inode,
1289                                         num_pages << PAGE_CACHE_SHIFT);
1290                         break;
1291                 }
1292
1293                 copied = btrfs_copy_from_user(pos, num_pages,
1294                                            write_bytes, pages, i);
1295
1296                 /*
1297                  * if we have trouble faulting in the pages, fall
1298                  * back to one page at a time
1299                  */
1300                 if (copied < write_bytes)
1301                         nrptrs = 1;
1302
1303                 if (copied == 0) {
1304                         force_page_uptodate = true;
1305                         dirty_pages = 0;
1306                 } else {
1307                         force_page_uptodate = false;
1308                         dirty_pages = (copied + offset +
1309                                        PAGE_CACHE_SIZE - 1) >>
1310                                        PAGE_CACHE_SHIFT;
1311                 }
1312
1313                 /*
1314                  * If we had a short copy we need to release the excess delaloc
1315                  * bytes we reserved.  We need to increment outstanding_extents
1316                  * because btrfs_delalloc_release_space will decrement it, but
1317                  * we still have an outstanding extent for the chunk we actually
1318                  * managed to copy.
1319                  */
1320                 if (num_pages > dirty_pages) {
1321                         if (copied > 0) {
1322                                 spin_lock(&BTRFS_I(inode)->lock);
1323                                 BTRFS_I(inode)->outstanding_extents++;
1324                                 spin_unlock(&BTRFS_I(inode)->lock);
1325                         }
1326                         btrfs_delalloc_release_space(inode,
1327                                         (num_pages - dirty_pages) <<
1328                                         PAGE_CACHE_SHIFT);
1329                 }
1330
1331                 if (copied > 0) {
1332                         ret = btrfs_dirty_pages(root, inode, pages,
1333                                                 dirty_pages, pos, copied,
1334                                                 NULL);
1335                         if (ret) {
1336                                 btrfs_delalloc_release_space(inode,
1337                                         dirty_pages << PAGE_CACHE_SHIFT);
1338                                 btrfs_drop_pages(pages, num_pages);
1339                                 break;
1340                         }
1341                 }
1342
1343                 btrfs_drop_pages(pages, num_pages);
1344
1345                 cond_resched();
1346
1347                 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1348                                                    dirty_pages);
1349                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1350                         btrfs_btree_balance_dirty(root, 1);
1351
1352                 pos += copied;
1353                 num_written += copied;
1354         }
1355
1356         kfree(pages);
1357
1358         return num_written ? num_written : ret;
1359 }
1360
1361 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1362                                     const struct iovec *iov,
1363                                     unsigned long nr_segs, loff_t pos,
1364                                     loff_t *ppos, size_t count, size_t ocount)
1365 {
1366         struct file *file = iocb->ki_filp;
1367         struct iov_iter i;
1368         ssize_t written;
1369         ssize_t written_buffered;
1370         loff_t endbyte;
1371         int err;
1372
1373         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1374                                             count, ocount);
1375
1376         if (written < 0 || written == count)
1377                 return written;
1378
1379         pos += written;
1380         count -= written;
1381         iov_iter_init(&i, iov, nr_segs, count, written);
1382         written_buffered = __btrfs_buffered_write(file, &i, pos);
1383         if (written_buffered < 0) {
1384                 err = written_buffered;
1385                 goto out;
1386         }
1387         endbyte = pos + written_buffered - 1;
1388         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1389         if (err)
1390                 goto out;
1391         written += written_buffered;
1392         *ppos = pos + written_buffered;
1393         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1394                                  endbyte >> PAGE_CACHE_SHIFT);
1395 out:
1396         return written ? written : err;
1397 }
1398
1399 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1400                                     const struct iovec *iov,
1401                                     unsigned long nr_segs, loff_t pos)
1402 {
1403         struct file *file = iocb->ki_filp;
1404         struct inode *inode = fdentry(file)->d_inode;
1405         struct btrfs_root *root = BTRFS_I(inode)->root;
1406         loff_t *ppos = &iocb->ki_pos;
1407         u64 start_pos;
1408         ssize_t num_written = 0;
1409         ssize_t err = 0;
1410         size_t count, ocount;
1411
1412         sb_start_write(inode->i_sb);
1413
1414         mutex_lock(&inode->i_mutex);
1415
1416         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1417         if (err) {
1418                 mutex_unlock(&inode->i_mutex);
1419                 goto out;
1420         }
1421         count = ocount;
1422
1423         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1424         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1425         if (err) {
1426                 mutex_unlock(&inode->i_mutex);
1427                 goto out;
1428         }
1429
1430         if (count == 0) {
1431                 mutex_unlock(&inode->i_mutex);
1432                 goto out;
1433         }
1434
1435         err = file_remove_suid(file);
1436         if (err) {
1437                 mutex_unlock(&inode->i_mutex);
1438                 goto out;
1439         }
1440
1441         /*
1442          * If BTRFS flips readonly due to some impossible error
1443          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1444          * although we have opened a file as writable, we have
1445          * to stop this write operation to ensure FS consistency.
1446          */
1447         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1448                 mutex_unlock(&inode->i_mutex);
1449                 err = -EROFS;
1450                 goto out;
1451         }
1452
1453         err = file_update_time(file);
1454         if (err) {
1455                 mutex_unlock(&inode->i_mutex);
1456                 goto out;
1457         }
1458
1459         start_pos = round_down(pos, root->sectorsize);
1460         if (start_pos > i_size_read(inode)) {
1461                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1462                 if (err) {
1463                         mutex_unlock(&inode->i_mutex);
1464                         goto out;
1465                 }
1466         }
1467
1468         if (unlikely(file->f_flags & O_DIRECT)) {
1469                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1470                                                    pos, ppos, count, ocount);
1471         } else {
1472                 struct iov_iter i;
1473
1474                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1475
1476                 num_written = __btrfs_buffered_write(file, &i, pos);
1477                 if (num_written > 0)
1478                         *ppos = pos + num_written;
1479         }
1480
1481         mutex_unlock(&inode->i_mutex);
1482
1483         /*
1484          * we want to make sure fsync finds this change
1485          * but we haven't joined a transaction running right now.
1486          *
1487          * Later on, someone is sure to update the inode and get the
1488          * real transid recorded.
1489          *
1490          * We set last_trans now to the fs_info generation + 1,
1491          * this will either be one more than the running transaction
1492          * or the generation used for the next transaction if there isn't
1493          * one running right now.
1494          */
1495         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1496         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1497                 err = generic_write_sync(file, pos, num_written);
1498                 if (err < 0 && num_written > 0)
1499                         num_written = err;
1500         }
1501 out:
1502         sb_end_write(inode->i_sb);
1503         current->backing_dev_info = NULL;
1504         return num_written ? num_written : err;
1505 }
1506
1507 int btrfs_release_file(struct inode *inode, struct file *filp)
1508 {
1509         /*
1510          * ordered_data_close is set by settattr when we are about to truncate
1511          * a file from a non-zero size to a zero size.  This tries to
1512          * flush down new bytes that may have been written if the
1513          * application were using truncate to replace a file in place.
1514          */
1515         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1516                                &BTRFS_I(inode)->runtime_flags)) {
1517                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1518                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1519                         filemap_flush(inode->i_mapping);
1520         }
1521         if (filp->private_data)
1522                 btrfs_ioctl_trans_end(filp);
1523         return 0;
1524 }
1525
1526 /*
1527  * fsync call for both files and directories.  This logs the inode into
1528  * the tree log instead of forcing full commits whenever possible.
1529  *
1530  * It needs to call filemap_fdatawait so that all ordered extent updates are
1531  * in the metadata btree are up to date for copying to the log.
1532  *
1533  * It drops the inode mutex before doing the tree log commit.  This is an
1534  * important optimization for directories because holding the mutex prevents
1535  * new operations on the dir while we write to disk.
1536  */
1537 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1538 {
1539         struct dentry *dentry = file->f_path.dentry;
1540         struct inode *inode = dentry->d_inode;
1541         struct btrfs_root *root = BTRFS_I(inode)->root;
1542         int ret = 0;
1543         struct btrfs_trans_handle *trans;
1544
1545         trace_btrfs_sync_file(file, datasync);
1546
1547         mutex_lock(&inode->i_mutex);
1548
1549         /*
1550          * we wait first, since the writeback may change the inode, also wait
1551          * ordered range does a filemape_write_and_wait_range which is why we
1552          * don't do it above like other file systems.
1553          */
1554         root->log_batch++;
1555         btrfs_wait_ordered_range(inode, start, end);
1556         root->log_batch++;
1557
1558         /*
1559          * check the transaction that last modified this inode
1560          * and see if its already been committed
1561          */
1562         if (!BTRFS_I(inode)->last_trans) {
1563                 mutex_unlock(&inode->i_mutex);
1564                 goto out;
1565         }
1566
1567         /*
1568          * if the last transaction that changed this file was before
1569          * the current transaction, we can bail out now without any
1570          * syncing
1571          */
1572         smp_mb();
1573         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1574             BTRFS_I(inode)->last_trans <=
1575             root->fs_info->last_trans_committed) {
1576                 BTRFS_I(inode)->last_trans = 0;
1577
1578                 /*
1579                  * We'v had everything committed since the last time we were
1580                  * modified so clear this flag in case it was set for whatever
1581                  * reason, it's no longer relevant.
1582                  */
1583                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1584                           &BTRFS_I(inode)->runtime_flags);
1585                 mutex_unlock(&inode->i_mutex);
1586                 goto out;
1587         }
1588
1589         /*
1590          * ok we haven't committed the transaction yet, lets do a commit
1591          */
1592         if (file->private_data)
1593                 btrfs_ioctl_trans_end(file);
1594
1595         trans = btrfs_start_transaction(root, 0);
1596         if (IS_ERR(trans)) {
1597                 ret = PTR_ERR(trans);
1598                 mutex_unlock(&inode->i_mutex);
1599                 goto out;
1600         }
1601
1602         ret = btrfs_log_dentry_safe(trans, root, dentry);
1603         if (ret < 0) {
1604                 mutex_unlock(&inode->i_mutex);
1605                 goto out;
1606         }
1607
1608         /* we've logged all the items and now have a consistent
1609          * version of the file in the log.  It is possible that
1610          * someone will come in and modify the file, but that's
1611          * fine because the log is consistent on disk, and we
1612          * have references to all of the file's extents
1613          *
1614          * It is possible that someone will come in and log the
1615          * file again, but that will end up using the synchronization
1616          * inside btrfs_sync_log to keep things safe.
1617          */
1618         mutex_unlock(&inode->i_mutex);
1619
1620         if (ret != BTRFS_NO_LOG_SYNC) {
1621                 if (ret > 0) {
1622                         ret = btrfs_commit_transaction(trans, root);
1623                 } else {
1624                         ret = btrfs_sync_log(trans, root);
1625                         if (ret == 0)
1626                                 ret = btrfs_end_transaction(trans, root);
1627                         else
1628                                 ret = btrfs_commit_transaction(trans, root);
1629                 }
1630         } else {
1631                 ret = btrfs_end_transaction(trans, root);
1632         }
1633 out:
1634         return ret > 0 ? -EIO : ret;
1635 }
1636
1637 static const struct vm_operations_struct btrfs_file_vm_ops = {
1638         .fault          = filemap_fault,
1639         .page_mkwrite   = btrfs_page_mkwrite,
1640 };
1641
1642 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1643 {
1644         struct address_space *mapping = filp->f_mapping;
1645
1646         if (!mapping->a_ops->readpage)
1647                 return -ENOEXEC;
1648
1649         file_accessed(filp);
1650         vma->vm_ops = &btrfs_file_vm_ops;
1651         vma->vm_flags |= VM_CAN_NONLINEAR;
1652
1653         return 0;
1654 }
1655
1656 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1657                           int slot, u64 start, u64 end)
1658 {
1659         struct btrfs_file_extent_item *fi;
1660         struct btrfs_key key;
1661
1662         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1663                 return 0;
1664
1665         btrfs_item_key_to_cpu(leaf, &key, slot);
1666         if (key.objectid != btrfs_ino(inode) ||
1667             key.type != BTRFS_EXTENT_DATA_KEY)
1668                 return 0;
1669
1670         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1671
1672         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1673                 return 0;
1674
1675         if (btrfs_file_extent_disk_bytenr(leaf, fi))
1676                 return 0;
1677
1678         if (key.offset == end)
1679                 return 1;
1680         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1681                 return 1;
1682         return 0;
1683 }
1684
1685 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1686                       struct btrfs_path *path, u64 offset, u64 end)
1687 {
1688         struct btrfs_root *root = BTRFS_I(inode)->root;
1689         struct extent_buffer *leaf;
1690         struct btrfs_file_extent_item *fi;
1691         struct extent_map *hole_em;
1692         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1693         struct btrfs_key key;
1694         int ret;
1695
1696         key.objectid = btrfs_ino(inode);
1697         key.type = BTRFS_EXTENT_DATA_KEY;
1698         key.offset = offset;
1699
1700
1701         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1702         if (ret < 0)
1703                 return ret;
1704         BUG_ON(!ret);
1705
1706         leaf = path->nodes[0];
1707         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1708                 u64 num_bytes;
1709
1710                 path->slots[0]--;
1711                 fi = btrfs_item_ptr(leaf, path->slots[0],
1712                                     struct btrfs_file_extent_item);
1713                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1714                         end - offset;
1715                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1716                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1717                 btrfs_set_file_extent_offset(leaf, fi, 0);
1718                 btrfs_mark_buffer_dirty(leaf);
1719                 goto out;
1720         }
1721
1722         if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1723                 u64 num_bytes;
1724
1725                 path->slots[0]++;
1726                 key.offset = offset;
1727                 btrfs_set_item_key_safe(trans, root, path, &key);
1728                 fi = btrfs_item_ptr(leaf, path->slots[0],
1729                                     struct btrfs_file_extent_item);
1730                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1731                         offset;
1732                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1733                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1734                 btrfs_set_file_extent_offset(leaf, fi, 0);
1735                 btrfs_mark_buffer_dirty(leaf);
1736                 goto out;
1737         }
1738         btrfs_release_path(path);
1739
1740         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1741                                        0, 0, end - offset, 0, end - offset,
1742                                        0, 0, 0);
1743         if (ret)
1744                 return ret;
1745
1746 out:
1747         btrfs_release_path(path);
1748
1749         hole_em = alloc_extent_map();
1750         if (!hole_em) {
1751                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1752                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1753                         &BTRFS_I(inode)->runtime_flags);
1754         } else {
1755                 hole_em->start = offset;
1756                 hole_em->len = end - offset;
1757                 hole_em->orig_start = offset;
1758
1759                 hole_em->block_start = EXTENT_MAP_HOLE;
1760                 hole_em->block_len = 0;
1761                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1762                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1763                 hole_em->generation = trans->transid;
1764
1765                 do {
1766                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1767                         write_lock(&em_tree->lock);
1768                         ret = add_extent_mapping(em_tree, hole_em);
1769                         if (!ret)
1770                                 list_move(&hole_em->list,
1771                                           &em_tree->modified_extents);
1772                         write_unlock(&em_tree->lock);
1773                 } while (ret == -EEXIST);
1774                 free_extent_map(hole_em);
1775                 if (ret)
1776                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1777                                 &BTRFS_I(inode)->runtime_flags);
1778         }
1779
1780         return 0;
1781 }
1782
1783 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1784 {
1785         struct btrfs_root *root = BTRFS_I(inode)->root;
1786         struct extent_state *cached_state = NULL;
1787         struct btrfs_path *path;
1788         struct btrfs_block_rsv *rsv;
1789         struct btrfs_trans_handle *trans;
1790         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1791         u64 lockstart = (offset + mask) & ~mask;
1792         u64 lockend = ((offset + len) & ~mask) - 1;
1793         u64 cur_offset = lockstart;
1794         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1795         u64 drop_end;
1796         unsigned long nr;
1797         int ret = 0;
1798         int err = 0;
1799         bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
1800                 ((offset + len) >> PAGE_CACHE_SHIFT);
1801
1802         btrfs_wait_ordered_range(inode, offset, len);
1803
1804         mutex_lock(&inode->i_mutex);
1805         if (offset >= inode->i_size) {
1806                 mutex_unlock(&inode->i_mutex);
1807                 return 0;
1808         }
1809
1810         /*
1811          * Only do this if we are in the same page and we aren't doing the
1812          * entire page.
1813          */
1814         if (same_page && len < PAGE_CACHE_SIZE) {
1815                 ret = btrfs_truncate_page(inode, offset, len, 0);
1816                 mutex_unlock(&inode->i_mutex);
1817                 return ret;
1818         }
1819
1820         /* zero back part of the first page */
1821         ret = btrfs_truncate_page(inode, offset, 0, 0);
1822         if (ret) {
1823                 mutex_unlock(&inode->i_mutex);
1824                 return ret;
1825         }
1826
1827         /* zero the front end of the last page */
1828         ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1829         if (ret) {
1830                 mutex_unlock(&inode->i_mutex);
1831                 return ret;
1832         }
1833
1834         if (lockend < lockstart) {
1835                 mutex_unlock(&inode->i_mutex);
1836                 return 0;
1837         }
1838
1839         while (1) {
1840                 struct btrfs_ordered_extent *ordered;
1841
1842                 truncate_pagecache_range(inode, lockstart, lockend);
1843
1844                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1845                                  0, &cached_state);
1846                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1847
1848                 /*
1849                  * We need to make sure we have no ordered extents in this range
1850                  * and nobody raced in and read a page in this range, if we did
1851                  * we need to try again.
1852                  */
1853                 if ((!ordered ||
1854                     (ordered->file_offset + ordered->len < lockstart ||
1855                      ordered->file_offset > lockend)) &&
1856                      !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1857                                      lockend, EXTENT_UPTODATE, 0,
1858                                      cached_state)) {
1859                         if (ordered)
1860                                 btrfs_put_ordered_extent(ordered);
1861                         break;
1862                 }
1863                 if (ordered)
1864                         btrfs_put_ordered_extent(ordered);
1865                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1866                                      lockend, &cached_state, GFP_NOFS);
1867                 btrfs_wait_ordered_range(inode, lockstart,
1868                                          lockend - lockstart + 1);
1869         }
1870
1871         path = btrfs_alloc_path();
1872         if (!path) {
1873                 ret = -ENOMEM;
1874                 goto out;
1875         }
1876
1877         rsv = btrfs_alloc_block_rsv(root);
1878         if (!rsv) {
1879                 ret = -ENOMEM;
1880                 goto out_free;
1881         }
1882         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1883         rsv->failfast = 1;
1884
1885         /*
1886          * 1 - update the inode
1887          * 1 - removing the extents in the range
1888          * 1 - adding the hole extent
1889          */
1890         trans = btrfs_start_transaction(root, 3);
1891         if (IS_ERR(trans)) {
1892                 err = PTR_ERR(trans);
1893                 goto out_free;
1894         }
1895
1896         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
1897                                       min_size);
1898         BUG_ON(ret);
1899         trans->block_rsv = rsv;
1900
1901         while (cur_offset < lockend) {
1902                 ret = __btrfs_drop_extents(trans, root, inode, path,
1903                                            cur_offset, lockend + 1,
1904                                            &drop_end, 1);
1905                 if (ret != -ENOSPC)
1906                         break;
1907
1908                 trans->block_rsv = &root->fs_info->trans_block_rsv;
1909
1910                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1911                 if (ret) {
1912                         err = ret;
1913                         break;
1914                 }
1915
1916                 cur_offset = drop_end;
1917
1918                 ret = btrfs_update_inode(trans, root, inode);
1919                 if (ret) {
1920                         err = ret;
1921                         break;
1922                 }
1923
1924                 nr = trans->blocks_used;
1925                 btrfs_end_transaction(trans, root);
1926                 btrfs_btree_balance_dirty(root, nr);
1927
1928                 trans = btrfs_start_transaction(root, 3);
1929                 if (IS_ERR(trans)) {
1930                         ret = PTR_ERR(trans);
1931                         trans = NULL;
1932                         break;
1933                 }
1934
1935                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
1936                                               rsv, min_size);
1937                 BUG_ON(ret);    /* shouldn't happen */
1938                 trans->block_rsv = rsv;
1939         }
1940
1941         if (ret) {
1942                 err = ret;
1943                 goto out_trans;
1944         }
1945
1946         trans->block_rsv = &root->fs_info->trans_block_rsv;
1947         ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1948         if (ret) {
1949                 err = ret;
1950                 goto out_trans;
1951         }
1952
1953 out_trans:
1954         if (!trans)
1955                 goto out_free;
1956
1957         trans->block_rsv = &root->fs_info->trans_block_rsv;
1958         ret = btrfs_update_inode(trans, root, inode);
1959         nr = trans->blocks_used;
1960         btrfs_end_transaction(trans, root);
1961         btrfs_btree_balance_dirty(root, nr);
1962 out_free:
1963         btrfs_free_path(path);
1964         btrfs_free_block_rsv(root, rsv);
1965 out:
1966         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1967                              &cached_state, GFP_NOFS);
1968         mutex_unlock(&inode->i_mutex);
1969         if (ret && !err)
1970                 err = ret;
1971         return err;
1972 }
1973
1974 static long btrfs_fallocate(struct file *file, int mode,
1975                             loff_t offset, loff_t len)
1976 {
1977         struct inode *inode = file->f_path.dentry->d_inode;
1978         struct extent_state *cached_state = NULL;
1979         u64 cur_offset;
1980         u64 last_byte;
1981         u64 alloc_start;
1982         u64 alloc_end;
1983         u64 alloc_hint = 0;
1984         u64 locked_end;
1985         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1986         struct extent_map *em;
1987         int ret;
1988
1989         alloc_start = offset & ~mask;
1990         alloc_end =  (offset + len + mask) & ~mask;
1991
1992         /* Make sure we aren't being give some crap mode */
1993         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1994                 return -EOPNOTSUPP;
1995
1996         if (mode & FALLOC_FL_PUNCH_HOLE)
1997                 return btrfs_punch_hole(inode, offset, len);
1998
1999         /*
2000          * Make sure we have enough space before we do the
2001          * allocation.
2002          */
2003         ret = btrfs_check_data_free_space(inode, len);
2004         if (ret)
2005                 return ret;
2006
2007         /*
2008          * wait for ordered IO before we have any locks.  We'll loop again
2009          * below with the locks held.
2010          */
2011         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2012
2013         mutex_lock(&inode->i_mutex);
2014         ret = inode_newsize_ok(inode, alloc_end);
2015         if (ret)
2016                 goto out;
2017
2018         if (alloc_start > inode->i_size) {
2019                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2020                                         alloc_start);
2021                 if (ret)
2022                         goto out;
2023         }
2024
2025         locked_end = alloc_end - 1;
2026         while (1) {
2027                 struct btrfs_ordered_extent *ordered;
2028
2029                 /* the extent lock is ordered inside the running
2030                  * transaction
2031                  */
2032                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2033                                  locked_end, 0, &cached_state);
2034                 ordered = btrfs_lookup_first_ordered_extent(inode,
2035                                                             alloc_end - 1);
2036                 if (ordered &&
2037                     ordered->file_offset + ordered->len > alloc_start &&
2038                     ordered->file_offset < alloc_end) {
2039                         btrfs_put_ordered_extent(ordered);
2040                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2041                                              alloc_start, locked_end,
2042                                              &cached_state, GFP_NOFS);
2043                         /*
2044                          * we can't wait on the range with the transaction
2045                          * running or with the extent lock held
2046                          */
2047                         btrfs_wait_ordered_range(inode, alloc_start,
2048                                                  alloc_end - alloc_start);
2049                 } else {
2050                         if (ordered)
2051                                 btrfs_put_ordered_extent(ordered);
2052                         break;
2053                 }
2054         }
2055
2056         cur_offset = alloc_start;
2057         while (1) {
2058                 u64 actual_end;
2059
2060                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2061                                       alloc_end - cur_offset, 0);
2062                 if (IS_ERR_OR_NULL(em)) {
2063                         if (!em)
2064                                 ret = -ENOMEM;
2065                         else
2066                                 ret = PTR_ERR(em);
2067                         break;
2068                 }
2069                 last_byte = min(extent_map_end(em), alloc_end);
2070                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2071                 last_byte = (last_byte + mask) & ~mask;
2072
2073                 if (em->block_start == EXTENT_MAP_HOLE ||
2074                     (cur_offset >= inode->i_size &&
2075                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2076                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2077                                                         last_byte - cur_offset,
2078                                                         1 << inode->i_blkbits,
2079                                                         offset + len,
2080                                                         &alloc_hint);
2081
2082                         if (ret < 0) {
2083                                 free_extent_map(em);
2084                                 break;
2085                         }
2086                 } else if (actual_end > inode->i_size &&
2087                            !(mode & FALLOC_FL_KEEP_SIZE)) {
2088                         /*
2089                          * We didn't need to allocate any more space, but we
2090                          * still extended the size of the file so we need to
2091                          * update i_size.
2092                          */
2093                         inode->i_ctime = CURRENT_TIME;
2094                         i_size_write(inode, actual_end);
2095                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2096                 }
2097                 free_extent_map(em);
2098
2099                 cur_offset = last_byte;
2100                 if (cur_offset >= alloc_end) {
2101                         ret = 0;
2102                         break;
2103                 }
2104         }
2105         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2106                              &cached_state, GFP_NOFS);
2107 out:
2108         mutex_unlock(&inode->i_mutex);
2109         /* Let go of our reservation. */
2110         btrfs_free_reserved_data_space(inode, len);
2111         return ret;
2112 }
2113
2114 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
2115 {
2116         struct btrfs_root *root = BTRFS_I(inode)->root;
2117         struct extent_map *em;
2118         struct extent_state *cached_state = NULL;
2119         u64 lockstart = *offset;
2120         u64 lockend = i_size_read(inode);
2121         u64 start = *offset;
2122         u64 orig_start = *offset;
2123         u64 len = i_size_read(inode);
2124         u64 last_end = 0;
2125         int ret = 0;
2126
2127         lockend = max_t(u64, root->sectorsize, lockend);
2128         if (lockend <= lockstart)
2129                 lockend = lockstart + root->sectorsize;
2130
2131         len = lockend - lockstart + 1;
2132
2133         len = max_t(u64, len, root->sectorsize);
2134         if (inode->i_size == 0)
2135                 return -ENXIO;
2136
2137         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2138                          &cached_state);
2139
2140         /*
2141          * Delalloc is such a pain.  If we have a hole and we have pending
2142          * delalloc for a portion of the hole we will get back a hole that
2143          * exists for the entire range since it hasn't been actually written
2144          * yet.  So to take care of this case we need to look for an extent just
2145          * before the position we want in case there is outstanding delalloc
2146          * going on here.
2147          */
2148         if (origin == SEEK_HOLE && start != 0) {
2149                 if (start <= root->sectorsize)
2150                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2151                                                      root->sectorsize, 0);
2152                 else
2153                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
2154                                                      start - root->sectorsize,
2155                                                      root->sectorsize, 0);
2156                 if (IS_ERR(em)) {
2157                         ret = PTR_ERR(em);
2158                         goto out;
2159                 }
2160                 last_end = em->start + em->len;
2161                 if (em->block_start == EXTENT_MAP_DELALLOC)
2162                         last_end = min_t(u64, last_end, inode->i_size);
2163                 free_extent_map(em);
2164         }
2165
2166         while (1) {
2167                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2168                 if (IS_ERR(em)) {
2169                         ret = PTR_ERR(em);
2170                         break;
2171                 }
2172
2173                 if (em->block_start == EXTENT_MAP_HOLE) {
2174                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2175                                 if (last_end <= orig_start) {
2176                                         free_extent_map(em);
2177                                         ret = -ENXIO;
2178                                         break;
2179                                 }
2180                         }
2181
2182                         if (origin == SEEK_HOLE) {
2183                                 *offset = start;
2184                                 free_extent_map(em);
2185                                 break;
2186                         }
2187                 } else {
2188                         if (origin == SEEK_DATA) {
2189                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
2190                                         if (start >= inode->i_size) {
2191                                                 free_extent_map(em);
2192                                                 ret = -ENXIO;
2193                                                 break;
2194                                         }
2195                                 }
2196
2197                                 *offset = start;
2198                                 free_extent_map(em);
2199                                 break;
2200                         }
2201                 }
2202
2203                 start = em->start + em->len;
2204                 last_end = em->start + em->len;
2205
2206                 if (em->block_start == EXTENT_MAP_DELALLOC)
2207                         last_end = min_t(u64, last_end, inode->i_size);
2208
2209                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2210                         free_extent_map(em);
2211                         ret = -ENXIO;
2212                         break;
2213                 }
2214                 free_extent_map(em);
2215                 cond_resched();
2216         }
2217         if (!ret)
2218                 *offset = min(*offset, inode->i_size);
2219 out:
2220         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2221                              &cached_state, GFP_NOFS);
2222         return ret;
2223 }
2224
2225 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
2226 {
2227         struct inode *inode = file->f_mapping->host;
2228         int ret;
2229
2230         mutex_lock(&inode->i_mutex);
2231         switch (origin) {
2232         case SEEK_END:
2233         case SEEK_CUR:
2234                 offset = generic_file_llseek(file, offset, origin);
2235                 goto out;
2236         case SEEK_DATA:
2237         case SEEK_HOLE:
2238                 if (offset >= i_size_read(inode)) {
2239                         mutex_unlock(&inode->i_mutex);
2240                         return -ENXIO;
2241                 }
2242
2243                 ret = find_desired_extent(inode, &offset, origin);
2244                 if (ret) {
2245                         mutex_unlock(&inode->i_mutex);
2246                         return ret;
2247                 }
2248         }
2249
2250         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2251                 offset = -EINVAL;
2252                 goto out;
2253         }
2254         if (offset > inode->i_sb->s_maxbytes) {
2255                 offset = -EINVAL;
2256                 goto out;
2257         }
2258
2259         /* Special lock needed here? */
2260         if (offset != file->f_pos) {
2261                 file->f_pos = offset;
2262                 file->f_version = 0;
2263         }
2264 out:
2265         mutex_unlock(&inode->i_mutex);
2266         return offset;
2267 }
2268
2269 const struct file_operations btrfs_file_operations = {
2270         .llseek         = btrfs_file_llseek,
2271         .read           = do_sync_read,
2272         .write          = do_sync_write,
2273         .aio_read       = generic_file_aio_read,
2274         .splice_read    = generic_file_splice_read,
2275         .aio_write      = btrfs_file_aio_write,
2276         .mmap           = btrfs_file_mmap,
2277         .open           = generic_file_open,
2278         .release        = btrfs_release_file,
2279         .fsync          = btrfs_sync_file,
2280         .fallocate      = btrfs_fallocate,
2281         .unlocked_ioctl = btrfs_ioctl,
2282 #ifdef CONFIG_COMPAT
2283         .compat_ioctl   = btrfs_ioctl,
2284 #endif
2285 };