]> Pileus Git - ~andy/linux/blobdiff - drivers/md/dm-thin.c
xtensa: fixup simdisk driver to work with immutable bio_vecs
[~andy/linux] / drivers / md / dm-thin.c
index af79bae5ab748e12fdf5468bfa4ae0377000ac25..357eb272dbd9d3337bf5d1a13db443261eb2b24b 100644 (file)
@@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (block_size_is_power_of_two(pool))
                block_nr >>= pool->sectors_per_block_shift;
@@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 {
        struct pool *pool = tc->pool;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = tc->pool_dev->bdev;
        if (block_size_is_power_of_two(pool))
-               bio->bi_sector = (block << pool->sectors_per_block_shift) |
-                               (bi_sector & (pool->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (block << pool->sectors_per_block_shift) |
+                       (bi_sector & (pool->sectors_per_block - 1));
        else
-               bio->bi_sector = (block * pool->sectors_per_block) +
+               bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
                                 sector_div(bi_sector, pool->sectors_per_block);
 }
 
@@ -610,8 +611,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
 
 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
-       if (m->bio)
+       if (m->bio) {
                m->bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&m->bio->bi_remaining);
+       }
        cell_error(m->tc->pool, m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
@@ -625,8 +628,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        int r;
 
        bio = m->bio;
-       if (bio)
+       if (bio) {
                bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&bio->bi_remaining);
+       }
 
        if (m->err) {
                cell_error(pool, m->cell);
@@ -723,7 +728,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
  */
 static int io_overlaps_block(struct pool *pool, struct bio *bio)
 {
-       return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+       return bio->bi_iter.bi_size ==
+               (pool->sectors_per_block << SECTOR_SHIFT);
 }
 
 static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1133,7 +1139,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_detain(pool, &key, bio, &cell))
                return;
 
-       if (bio_data_dir(bio) == WRITE && bio->bi_size)
+       if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1156,7 +1162,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
        /*
         * Remap empty bios (flushes) immediately, without provisioning.
         */
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                inc_all_io_entry(pool, bio);
                cell_defer_no_holder(tc, cell);
 
@@ -1256,7 +1262,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
        switch (r) {
        case 0:
-               if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+               if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
                        bio_io_error(bio);
                else {
                        inc_all_io_entry(tc->pool, bio);
@@ -1400,6 +1406,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
        case PM_FAIL:
                DMERR("%s: switching pool to failure mode",
                      dm_device_name(pool->pool_md));
+               dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_fail;
                pool->process_discard = process_bio_fail;
                pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1424,6 +1431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
                break;
 
        case PM_WRITE:
+               dm_pool_metadata_read_write(pool->pmd);
                pool->process_bio = process_bio;
                pool->process_discard = process_discard;
                pool->process_prepared_mapping = process_prepared_mapping;
@@ -1640,12 +1648,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
        struct pool_c *pt = ti->private;
 
        /*
-        * We want to make sure that degraded pools are never upgraded.
+        * We want to make sure that a pool in PM_FAIL mode is never upgraded.
         */
        enum pool_mode old_mode = pool->pf.mode;
        enum pool_mode new_mode = pt->adjusted_pf.mode;
 
-       if (old_mode > new_mode)
+       /*
+        * If we were in PM_FAIL mode, rollback of metadata failed.  We're
+        * not going to recover without a thin_repair.  So we never let the
+        * pool move out of the old mode.  On the other hand a PM_READ_ONLY
+        * may have been due to a lack of metadata or data space, and may
+        * now work (ie. if the underlying devices have been resized).
+        */
+       if (old_mode == PM_FAIL)
                new_mode = old_mode;
 
        pool->ti = ti;
@@ -2870,7 +2885,7 @@ out_unlock:
 
 static int thin_map(struct dm_target *ti, struct bio *bio)
 {
-       bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return thin_bio_map(ti, bio);
 }