]> Pileus Git - ~andy/linux/blobdiff - drivers/md/bcache/btree.c
Merge tag 'v3.13-rc6' into for-3.14/core
[~andy/linux] / drivers / md / bcache / btree.c
index 5e2765aadce174e9b7cffd479667a2c48bcf500f..946ecd3b048b0ae1c9bd47ab4c42572f7919b838 100644 (file)
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
 
        bio = bch_bbio_alloc(b->c);
        bio->bi_rw      = REQ_META|READ_SYNC;
-       bio->bi_size    = KEY_SIZE(&b->key) << 9;
+       bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
        bio->bi_end_io  = btree_node_read_endio;
        bio->bi_private = &cl;
 
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
        struct bio_vec *bv;
        int n;
 
-       __bio_for_each_segment(bv, b->bio, n, 0)
+       bio_for_each_segment_all(bv, b->bio, n)
                __free_page(bv->bv_page);
 
        __btree_node_write_done(cl);
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
        b->bio->bi_end_io       = btree_node_write_endio;
        b->bio->bi_private      = cl;
        b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
-       b->bio->bi_size         = set_blocks(i, b->c) * block_bytes(b->c);
+       b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
        bch_bio_map(b->bio, i);
 
        /*
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
                struct bio_vec *bv;
                void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
 
-               bio_for_each_segment(bv, b->bio, j)
+               bio_for_each_segment_all(bv, b->bio, j)
                        memcpy(page_address(bv->bv_page),
                               base + j * PAGE_SIZE, PAGE_SIZE);
 
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
                SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
                            GC_MARK_METADATA);
 
+       /* don't reclaim buckets to which writeback keys point */
+       rcu_read_lock();
+       for (i = 0; i < c->nr_uuids; i++) {
+               struct bcache_device *d = c->devices[i];
+               struct cached_dev *dc;
+               struct keybuf_key *w, *n;
+               unsigned j;
+
+               if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+                       continue;
+               dc = container_of(d, struct cached_dev, disk);
+
+               spin_lock(&dc->writeback_keys.lock);
+               rbtree_postorder_for_each_entry_safe(w, n,
+                                       &dc->writeback_keys.keys, node)
+                       for (j = 0; j < KEY_PTRS(&w->key); j++)
+                               SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+                                           GC_MARK_DIRTY);
+               spin_unlock(&dc->writeback_keys.lock);
+       }
+       rcu_read_unlock();
+
        for_each_cache(ca, c, i) {
                uint64_t *i;
 
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
                        if (KEY_START(k) > KEY_START(insert) + sectors_found)
                                goto check_failed;
 
-                       if (KEY_PTRS(replace_key) != KEY_PTRS(k))
+                       if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+                           KEY_DIRTY(k) != KEY_DIRTY(replace_key))
                                goto check_failed;
 
                        /* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
        struct bkey     *replace_key;
 };
 
-int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
 {
        struct btree_insert_op *op = container_of(b_op,
                                        struct btree_insert_op, op);