]> Pileus Git - ~andy/linux/blobdiff - fs/btrfs/file.c
Linux 3.14
[~andy/linux] / fs / btrfs / file.c
index 35bf83876f3fe51eae6f4769a89246981f10d1c9..0165b8672f099c49f96400fc0c87cc0b7cfc4532 100644 (file)
@@ -692,7 +692,10 @@ next:
 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root, struct inode *inode,
                         struct btrfs_path *path, u64 start, u64 end,
-                        u64 *drop_end, int drop_cache)
+                        u64 *drop_end, int drop_cache,
+                        int replace_extent,
+                        u32 extent_item_size,
+                        int *key_inserted)
 {
        struct extent_buffer *leaf;
        struct btrfs_file_extent_item *fi;
@@ -712,6 +715,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
        int modify_tree = -1;
        int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
        int found = 0;
+       int leafs_visited = 0;
 
        if (drop_cache)
                btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -733,6 +737,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
                                path->slots[0]--;
                }
                ret = 0;
+               leafs_visited++;
 next_slot:
                leaf = path->nodes[0];
                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
@@ -744,6 +749,7 @@ next_slot:
                                ret = 0;
                                break;
                        }
+                       leafs_visited++;
                        leaf = path->nodes[0];
                        recow = 1;
                }
@@ -766,7 +772,8 @@ next_slot:
                                btrfs_file_extent_num_bytes(leaf, fi);
                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
                        extent_end = key.offset +
-                               btrfs_file_extent_inline_len(leaf, fi);
+                               btrfs_file_extent_inline_len(leaf,
+                                                    path->slots[0], fi);
                } else {
                        WARN_ON(1);
                        extent_end = search_start;
@@ -927,14 +934,44 @@ next_slot:
        }
 
        if (!ret && del_nr > 0) {
+               /*
+                * Set path->slots[0] to first slot, so that after the delete
+                * if items are move off from our leaf to its immediate left or
+                * right neighbor leafs, we end up with a correct and adjusted
+                * path->slots[0] for our insertion.
+                */
+               path->slots[0] = del_slot;
                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
                if (ret)
                        btrfs_abort_transaction(trans, root, ret);
+
+               leaf = path->nodes[0];
+               /*
+                * leaf eb has flag EXTENT_BUFFER_STALE if it was deleted (that
+                * is, its contents got pushed to its neighbors), in which case
+                * it means path->locks[0] == 0
+                */
+               if (!ret && replace_extent && leafs_visited == 1 &&
+                   path->locks[0] &&
+                   btrfs_leaf_free_space(root, leaf) >=
+                   sizeof(struct btrfs_item) + extent_item_size) {
+
+                       key.objectid = ino;
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = start;
+                       setup_items_for_insert(root, path, &key,
+                                              &extent_item_size,
+                                              extent_item_size,
+                                              sizeof(struct btrfs_item) +
+                                              extent_item_size, 1);
+                       *key_inserted = 1;
+               }
        }
 
+       if (!replace_extent || !(*key_inserted))
+               btrfs_release_path(path);
        if (drop_end)
                *drop_end = found ? min(end, extent_end) : end;
-       btrfs_release_path(path);
        return ret;
 }
 
@@ -949,7 +986,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
        ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
-                                  drop_cache);
+                                  drop_cache, 0, 0, NULL);
        btrfs_free_path(path);
        return ret;
 }
@@ -1555,9 +1592,10 @@ again:
                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
                                             lockstart, lockend, &cached_state,
                                             GFP_NOFS);
-               btrfs_drop_pages(pages, num_pages);
-               if (ret)
+               if (ret) {
+                       btrfs_drop_pages(pages, num_pages);
                        break;
+               }
 
                release_bytes = 0;
                if (only_release_metadata && copied > 0) {
@@ -1571,6 +1609,8 @@ again:
                        only_release_metadata = false;
                }
 
+               btrfs_drop_pages(pages, num_pages);
+
                cond_resched();
 
                balance_dirty_pages_ratelimited(inode->i_mapping);
@@ -1892,12 +1932,24 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        if (file->private_data)
                btrfs_ioctl_trans_end(file);
 
+       /*
+        * We use start here because we will need to wait on the IO to complete
+        * in btrfs_sync_log, which could require joining a transaction (for
+        * example checking cross references in the nocow path).  If we use join
+        * here we could get into a situation where we're waiting on IO to
+        * happen that is blocked on a transaction trying to commit.  With start
+        * we inc the extwriter counter, so we wait for all extwriters to exit
+        * before we start blocking join'ers.  This comment is to keep somebody
+        * from thinking they are super smart and changing this to
+        * btrfs_join_transaction *cough*Josef*cough*.
+        */
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
                mutex_unlock(&inode->i_mutex);
                goto out;
        }
+       trans->sync = true;
 
        ret = btrfs_log_dentry_safe(trans, root, dentry);
        if (ret < 0) {
@@ -2219,7 +2271,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        while (cur_offset < lockend) {
                ret = __btrfs_drop_extents(trans, root, inode, path,
                                           cur_offset, lockend + 1,
-                                          &drop_end, 1);
+                                          &drop_end, 1, 0, 0, NULL);
                if (ret != -ENOSPC)
                        break;