]> Pileus Git - ~andy/linux/blobdiff - drivers/md/dm-thin.c
iscsi-target: Perform release of acknowledged tags from RX context
[~andy/linux] / drivers / md / dm-thin.c
index 88f2f802d528be23b8e64c26085913677082be03..2c0cf511ec2385fa5a558b5d2e1e1ed0c874c9f6 100644 (file)
@@ -887,7 +887,8 @@ static int commit(struct pool *pool)
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
-               DMERR_LIMIT("commit failed: error = %d", r);
+               DMERR_LIMIT("%s: commit failed: error = %d",
+                           dm_device_name(pool->pool_md), r);
 
        return r;
 }
@@ -917,6 +918,13 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
        unsigned long flags;
        struct pool *pool = tc->pool;
 
+       /*
+        * Once no_free_space is set we must not allow allocation to succeed.
+        * Otherwise it is difficult to explain, debug, test and support.
+        */
+       if (pool->no_free_space)
+               return -ENOSPC;
+
        r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
        if (r)
                return r;
@@ -931,31 +939,30 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
        }
 
        if (!free_blocks) {
-               if (pool->no_free_space)
-                       return -ENOSPC;
-               else {
-                       /*
-                        * Try to commit to see if that will free up some
-                        * more space.
-                        */
-                       (void) commit_or_fallback(pool);
+               /*
+                * Try to commit to see if that will free up some
+                * more space.
+                */
+               (void) commit_or_fallback(pool);
 
-                       r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
-                       if (r)
-                               return r;
+               r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+               if (r)
+                       return r;
 
-                       /*
-                        * If we still have no space we set a flag to avoid
-                        * doing all this checking and return -ENOSPC.
-                        */
-                       if (!free_blocks) {
-                               DMWARN("%s: no free space available.",
-                                      dm_device_name(pool->pool_md));
-                               spin_lock_irqsave(&pool->lock, flags);
-                               pool->no_free_space = 1;
-                               spin_unlock_irqrestore(&pool->lock, flags);
-                               return -ENOSPC;
-                       }
+               /*
+                * If we still have no space we set a flag to avoid
+                * doing all this checking and return -ENOSPC.  This
+                * flag serves as a latch that disallows allocations from
+                * this pool until the admin takes action (e.g. resize or
+                * table reload).
+                */
+               if (!free_blocks) {
+                       DMWARN("%s: no free space available.",
+                              dm_device_name(pool->pool_md));
+                       spin_lock_irqsave(&pool->lock, flags);
+                       pool->no_free_space = 1;
+                       spin_unlock_irqrestore(&pool->lock, flags);
+                       return -ENOSPC;
                }
        }
 
@@ -1085,6 +1092,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
 {
        int r;
        dm_block_t data_block;
+       struct pool *pool = tc->pool;
 
        r = alloc_data_block(tc, &data_block);
        switch (r) {
@@ -1094,13 +1102,14 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
                break;
 
        case -ENOSPC:
-               no_space(tc->pool, cell);
+               no_space(pool, cell);
                break;
 
        default:
                DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
                            __func__, r);
-               cell_error(tc->pool, cell);
+               set_pool_mode(pool, PM_READ_ONLY);
+               cell_error(pool, cell);
                break;
        }
 }
@@ -1386,7 +1395,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
 
        switch (mode) {
        case PM_FAIL:
-               DMERR("switching pool to failure mode");
+               DMERR("%s: switching pool to failure mode",
+                     dm_device_name(pool->pool_md));
                pool->process_bio = process_bio_fail;
                pool->process_discard = process_bio_fail;
                pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1394,10 +1404,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
                break;
 
        case PM_READ_ONLY:
-               DMERR("switching pool to read-only mode");
+               DMERR("%s: switching pool to read-only mode",
+                     dm_device_name(pool->pool_md));
                r = dm_pool_abort_metadata(pool->pmd);
                if (r) {
-                       DMERR("aborting transaction failed");
+                       DMERR("%s: aborting transaction failed",
+                             dm_device_name(pool->pool_md));
                        set_pool_mode(pool, PM_FAIL);
                } else {
                        dm_pool_metadata_read_only(pool->pmd);
@@ -2083,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
         * them down to the data device.  The thin device's discard
         * processing will cause mappings to be removed from the btree.
         */
+       ti->discard_zeroes_data_unsupported = true;
        if (pf.discard_enabled && pf.discard_passdown) {
                ti->num_discard_bios = 1;
 
@@ -2092,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                 * thin devices' discard limits consistent).
                 */
                ti->discards_supported = true;
-               ti->discard_zeroes_data_unsupported = true;
        }
        ti->private = pt;
 
@@ -2156,19 +2168,22 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
 
        r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
        if (r) {
-               DMERR("failed to retrieve data device size");
+               DMERR("%s: failed to retrieve data device size",
+                     dm_device_name(pool->pool_md));
                return r;
        }
 
        if (data_size < sb_data_size) {
-               DMERR("pool target (%llu blocks) too small: expected %llu",
+               DMERR("%s: pool target (%llu blocks) too small: expected %llu",
+                     dm_device_name(pool->pool_md),
                      (unsigned long long)data_size, sb_data_size);
                return -EINVAL;
 
        } else if (data_size > sb_data_size) {
                r = dm_pool_resize_data_dev(pool->pmd, data_size);
                if (r) {
-                       DMERR("failed to resize data device");
+                       DMERR("%s: failed to resize data device",
+                             dm_device_name(pool->pool_md));
                        set_pool_mode(pool, PM_READ_ONLY);
                        return r;
                }
@@ -2192,19 +2207,22 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
 
        r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
        if (r) {
-               DMERR("failed to retrieve data device size");
+               DMERR("%s: failed to retrieve metadata device size",
+                     dm_device_name(pool->pool_md));
                return r;
        }
 
        if (metadata_dev_size < sb_metadata_dev_size) {
-               DMERR("metadata device (%llu blocks) too small: expected %llu",
+               DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
+                     dm_device_name(pool->pool_md),
                      metadata_dev_size, sb_metadata_dev_size);
                return -EINVAL;
 
        } else if (metadata_dev_size > sb_metadata_dev_size) {
                r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
                if (r) {
-                       DMERR("failed to resize metadata device");
+                       DMERR("%s: failed to resize metadata device",
+                             dm_device_name(pool->pool_md));
                        return r;
                }
 
@@ -2530,37 +2548,43 @@ static void pool_status(struct dm_target *ti, status_type_t type,
 
                r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
                if (r) {
-                       DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+                       DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
+                             dm_device_name(pool->pool_md), r);
                        goto err;
                }
 
                r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
                if (r) {
-                       DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+                       DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
+                             dm_device_name(pool->pool_md), r);
                        goto err;
                }
 
                r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
                if (r) {
-                       DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+                       DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
+                             dm_device_name(pool->pool_md), r);
                        goto err;
                }
 
                r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
                if (r) {
-                       DMERR("dm_pool_get_free_block_count returned %d", r);
+                       DMERR("%s: dm_pool_get_free_block_count returned %d",
+                             dm_device_name(pool->pool_md), r);
                        goto err;
                }
 
                r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
                if (r) {
-                       DMERR("dm_pool_get_data_dev_size returned %d", r);
+                       DMERR("%s: dm_pool_get_data_dev_size returned %d",
+                             dm_device_name(pool->pool_md), r);
                        goto err;
                }
 
                r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
                if (r) {
-                       DMERR("dm_pool_get_metadata_snap returned %d", r);
+                       DMERR("%s: dm_pool_get_metadata_snap returned %d",
+                             dm_device_name(pool->pool_md), r);
                        goto err;
                }
 
@@ -2648,17 +2672,33 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
+       uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
 
-       blk_limits_io_min(limits, 0);
-       blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+       /*
+        * If the system-determined stacked limits are compatible with the
+        * pool's blocksize (io_opt is a factor) do not override them.
+        */
+       if (io_opt_sectors < pool->sectors_per_block ||
+           do_div(io_opt_sectors, pool->sectors_per_block)) {
+               blk_limits_io_min(limits, 0);
+               blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+       }
 
        /*
         * pt->adjusted_pf is a staging area for the actual features to use.
         * They get transferred to the live pool in bind_control_target()
         * called from pool_preresume().
         */
-       if (!pt->adjusted_pf.discard_enabled)
+       if (!pt->adjusted_pf.discard_enabled) {
+               /*
+                * Must explicitly disallow stacking discard limits otherwise the
+                * block layer will stack them if pool's data device has support.
+                * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
+                * user to see that, so make sure to set all discard limits to 0.
+                */
+               limits->discard_granularity = 0;
                return;
+       }
 
        disable_passdown_if_not_supported(pt);
 
@@ -2669,7 +2709,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 8, 0},
+       .version = {1, 9, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2794,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
+       ti->discard_zeroes_data_unsupported = true;
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = true;
                ti->num_discard_bios = 1;
-               ti->discard_zeroes_data_unsupported = true;
                /* Discard bios must be split on a block boundary */
                ti->split_discard_bios = true;
        }
@@ -2956,7 +2996,7 @@ static int thin_iterate_devices(struct dm_target *ti,
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 8, 0},
+       .version = {1, 9, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,