]> Pileus Git - ~andy/linux/blobdiff - drivers/md/dm-thin.c
Merge tag 'rdma-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
[~andy/linux] / drivers / md / dm-thin.c
index 213ae32a0fc4f7eadacb923a5f1f2ac69163f5c7..37fdaf81bd1f89abfd28f6a46d2be7ce95f8bcba 100644 (file)
@@ -111,7 +111,7 @@ struct cell_key {
        dm_block_t block;
 };
 
-struct cell {
+struct dm_bio_prison_cell {
        struct hlist_node list;
        struct bio_prison *prison;
        struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
        return n;
 }
 
+static struct kmem_cache *_cell_cache;
+
 /*
  * @nr_cells should be the number of cells you want in use _concurrently_.
  * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
                return NULL;
 
        spin_lock_init(&prison->lock);
-       prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
-                                                       sizeof(struct cell));
+       prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
        if (!prison->cell_pool) {
                kfree(prison);
                return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
                       (lhs->block == rhs->block);
 }
 
-static struct cell *__search_bucket(struct hlist_head *bucket,
-                                   struct cell_key *key)
+static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+                                                 struct cell_key *key)
 {
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct hlist_node *tmp;
 
        hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  */
 static int bio_detain(struct bio_prison *prison, struct cell_key *key,
-                     struct bio *inmate, struct cell **ref)
+                     struct bio *inmate, struct dm_bio_prison_cell **ref)
 {
        int r = 1;
        unsigned long flags;
        uint32_t hash = hash_key(prison, key);
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
 
        BUG_ON(hash > prison->nr_buckets);
 
@@ -273,19 +274,21 @@ out:
 /*
  * @inmates must have been initialised prior to this call
  */
-static void __cell_release(struct cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 {
        struct bio_prison *prison = cell->prison;
 
        hlist_del(&cell->list);
 
-       bio_list_add(inmates, cell->holder);
-       bio_list_merge(inmates, &cell->bios);
+       if (inmates) {
+               bio_list_add(inmates, cell->holder);
+               bio_list_merge(inmates, &cell->bios);
+       }
 
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release(struct cell *cell, struct bio_list *bios)
+static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -301,14 +304,15 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
  * bio may be in the cell.  This function releases the cell, and also does
  * a sanity check.
  */
-static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
-       hlist_del(&cell->list);
        BUG_ON(cell->holder != bio);
        BUG_ON(!bio_list_empty(&cell->bios));
+
+       __cell_release(cell, NULL);
 }
 
-static void cell_release_singleton(struct cell *cell, struct bio *bio)
+static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -321,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
 /*
  * Sometimes we don't want the holder, just the additional bios.
  */
-static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                    struct bio_list *inmates)
 {
        struct bio_prison *prison = cell->prison;
 
@@ -331,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                  struct bio_list *inmates)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -341,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
        spin_unlock_irqrestore(&prison->lock, flags);
 }
 
-static void cell_error(struct cell *cell)
+static void cell_error(struct dm_bio_prison_cell *cell)
 {
        struct bio_prison *prison = cell->prison;
        struct bio_list bios;
@@ -488,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
  * also provides the interface for creating and destroying internal
  * devices.
  */
-struct new_mapping;
+struct dm_thin_new_mapping;
 
 struct pool_features {
        unsigned zero_new_blocks:1;
@@ -534,7 +540,7 @@ struct pool {
        struct deferred_set shared_read_ds;
        struct deferred_set all_io_ds;
 
-       struct new_mapping *next_mapping;
+       struct dm_thin_new_mapping *next_mapping;
        mempool_t *mapping_pool;
        mempool_t *endio_hook_pool;
 };
@@ -627,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
 
 /*----------------------------------------------------------------*/
 
-struct endio_hook {
+struct dm_thin_endio_hook {
        struct thin_c *tc;
        struct deferred_entry *shared_read_entry;
        struct deferred_entry *all_io_entry;
-       struct new_mapping *overwrite_mapping;
+       struct dm_thin_new_mapping *overwrite_mapping;
 };
 
 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -644,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
        bio_list_init(master);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                if (h->tc == tc)
                        bio_endio(bio, DM_ENDIO_REQUEUE);
                else
@@ -733,7 +740,7 @@ static void wake_worker(struct pool *pool)
 /*
  * Bio endio functions.
  */
-struct new_mapping {
+struct dm_thin_new_mapping {
        struct list_head list;
 
        unsigned quiesced:1;
@@ -743,7 +750,7 @@ struct new_mapping {
        struct thin_c *tc;
        dm_block_t virt_block;
        dm_block_t data_block;
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
        int err;
 
        /*
@@ -756,7 +763,7 @@ struct new_mapping {
        bio_end_io_t *saved_bi_end_io;
 };
 
-static void __maybe_add_mapping(struct new_mapping *m)
+static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 {
        struct pool *pool = m->tc->pool;
 
@@ -769,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
 static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
        unsigned long flags;
-       struct new_mapping *m = context;
+       struct dm_thin_new_mapping *m = context;
        struct pool *pool = m->tc->pool;
 
        m->err = read_err || write_err ? -EIO : 0;
@@ -783,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 static void overwrite_endio(struct bio *bio, int err)
 {
        unsigned long flags;
-       struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
-       struct new_mapping *m = h->overwrite_mapping;
+       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_new_mapping *m = h->overwrite_mapping;
        struct pool *pool = m->tc->pool;
 
        m->err = err;
@@ -808,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
 /*
  * This sends the bios in the cell back to the deferred_bios list.
  */
-static void cell_defer(struct thin_c *tc, struct cell *cell,
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
                       dm_block_t data_block)
 {
        struct pool *pool = tc->pool;
@@ -825,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
  * Same as cell_defer above, except it omits one particular detainee,
  * a write bio that covers the block and has already been processed.
  */
-static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
        struct bio_list bios;
        struct pool *pool = tc->pool;
@@ -840,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
        wake_worker(pool);
 }
 
-static void process_prepared_mapping(struct new_mapping *m)
+static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 {
        struct thin_c *tc = m->tc;
        struct bio *bio;
@@ -883,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
        mempool_free(m, tc->pool->mapping_pool);
 }
 
-static void process_prepared_discard(struct new_mapping *m)
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
 {
        int r;
        struct thin_c *tc = m->tc;
@@ -906,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
 }
 
 static void process_prepared(struct pool *pool, struct list_head *head,
-                            void (*fn)(struct new_mapping *))
+                            void (*fn)(struct dm_thin_new_mapping *))
 {
        unsigned long flags;
        struct list_head maps;
-       struct new_mapping *m, *tmp;
+       struct dm_thin_new_mapping *m, *tmp;
 
        INIT_LIST_HEAD(&maps);
        spin_lock_irqsave(&pool->lock, flags);
@@ -954,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
        return pool->next_mapping ? 0 : -ENOMEM;
 }
 
-static struct new_mapping *get_next_mapping(struct pool *pool)
+static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 {
-       struct new_mapping *r = pool->next_mapping;
+       struct dm_thin_new_mapping *r = pool->next_mapping;
 
        BUG_ON(!pool->next_mapping);
 
@@ -968,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
                          struct dm_dev *origin, dm_block_t data_origin,
                          dm_block_t data_dest,
-                         struct cell *cell, struct bio *bio)
+                         struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        int r;
        struct pool *pool = tc->pool;
-       struct new_mapping *m = get_next_mapping(pool);
+       struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
        INIT_LIST_HEAD(&m->list);
        m->quiesced = 0;
@@ -994,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
         * bio immediately. Otherwise we use kcopyd to clone the data first.
         */
        if (io_overwrites_block(pool, bio)) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1022,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
                                   dm_block_t data_origin, dm_block_t data_dest,
-                                  struct cell *cell, struct bio *bio)
+                                  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        schedule_copy(tc, virt_block, tc->pool_dev,
                      data_origin, data_dest, cell, bio);
@@ -1030,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
                                   dm_block_t data_dest,
-                                  struct cell *cell, struct bio *bio)
+                                  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        schedule_copy(tc, virt_block, tc->origin_dev,
                      virt_block, data_dest, cell, bio);
 }
 
 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
-                         dm_block_t data_block, struct cell *cell,
+                         dm_block_t data_block, struct dm_bio_prison_cell *cell,
                          struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       struct new_mapping *m = get_next_mapping(pool);
+       struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
        INIT_LIST_HEAD(&m->list);
        m->quiesced = 1;
@@ -1062,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                process_prepared_mapping(m);
 
        else if (io_overwrites_block(pool, bio)) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
                remap_and_issue(tc, bio, data_block);
-
        } else {
                int r;
                struct dm_io_region to;
@@ -1152,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
  */
 static void retry_on_resume(struct bio *bio)
 {
-       struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
        struct thin_c *tc = h->tc;
        struct pool *pool = tc->pool;
        unsigned long flags;
@@ -1162,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static void no_space(struct cell *cell)
+static void no_space(struct dm_bio_prison_cell *cell)
 {
        struct bio *bio;
        struct bio_list bios;
@@ -1177,12 +1185,13 @@ static void no_space(struct cell *cell)
 static void process_discard(struct thin_c *tc, struct bio *bio)
 {
        int r;
+       unsigned long flags;
        struct pool *pool = tc->pool;
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
        struct cell_key key, key2;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_lookup_result lookup_result;
-       struct new_mapping *m;
+       struct dm_thin_new_mapping *m;
 
        build_virtual_key(tc->td, block, &key);
        if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1218,7 +1227,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                        m->bio = bio;
 
                        if (!ds_add_work(&pool->all_io_ds, &m->list)) {
+                               spin_lock_irqsave(&pool->lock, flags);
                                list_add(&m->list, &pool->prepared_discards);
+                               spin_unlock_irqrestore(&pool->lock, flags);
                                wake_worker(pool);
                        }
                } else {
@@ -1257,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
                          struct cell_key *key,
                          struct dm_thin_lookup_result *lookup_result,
-                         struct cell *cell)
+                         struct dm_bio_prison_cell *cell)
 {
        int r;
        dm_block_t data_block;
@@ -1284,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
                               dm_block_t block,
                               struct dm_thin_lookup_result *lookup_result)
 {
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct pool *pool = tc->pool;
        struct cell_key key;
 
@@ -1299,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_data_dir(bio) == WRITE)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
                h->shared_read_entry = ds_inc(&pool->shared_read_ds);
 
@@ -1309,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
 }
 
 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
-                           struct cell *cell)
+                           struct dm_bio_prison_cell *cell)
 {
        int r;
        dm_block_t data_block;
@@ -1357,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
 {
        int r;
        dm_block_t block = get_bio_block(tc, bio);
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct cell_key key;
        struct dm_thin_lookup_result lookup_result;
 
@@ -1426,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool)
        spin_unlock_irqrestore(&pool->lock, flags);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
                struct thin_c *tc = h->tc;
 
                /*
@@ -1516,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
        wake_worker(pool);
 }
 
-static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+       struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
 
        h->tc = tc;
        h->shared_read_entry = NULL;
@@ -1626,6 +1637,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
        pool->low_water_blocks = pt->low_water_blocks;
        pool->pf = pt->pf;
 
+       /*
+        * If discard_passdown was enabled verify that the data device
+        * supports discards.  Disable discard_passdown if not; otherwise
+        * -EOPNOTSUPP will be returned.
+        */
+       if (pt->pf.discard_passdown) {
+               struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
+               if (!q || !blk_queue_discard(q)) {
+                       char buf[BDEVNAME_SIZE];
+                       DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
+                              bdevname(pt->data_dev->bdev, buf));
+                       pool->pf.discard_passdown = 0;
+               }
+       }
+
        return 0;
 }
 
@@ -1666,6 +1692,9 @@ static void __pool_destroy(struct pool *pool)
        kfree(pool);
 }
 
+static struct kmem_cache *_new_mapping_cache;
+static struct kmem_cache *_endio_hook_cache;
+
 static struct pool *pool_create(struct mapped_device *pool_md,
                                struct block_device *metadata_dev,
                                unsigned long block_size, char **error)
@@ -1734,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
        ds_init(&pool->all_io_ds);
 
        pool->next_mapping = NULL;
-       pool->mapping_pool =
-               mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+       pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
+                                                     _new_mapping_cache);
        if (!pool->mapping_pool) {
                *error = "Error creating pool's mapping mempool";
                err_p = ERR_PTR(-ENOMEM);
                goto bad_mapping_pool;
        }
 
-       pool->endio_hook_pool =
-               mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+       pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
+                                                        _endio_hook_cache);
        if (!pool->endio_hook_pool) {
                *error = "Error creating pool's endio_hook mempool";
                err_p = ERR_PTR(-ENOMEM);
@@ -1982,19 +2011,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out_flags_changed;
        }
 
-       /*
-        * If discard_passdown was enabled verify that the data device
-        * supports discards.  Disable discard_passdown if not; otherwise
-        * -EOPNOTSUPP will be returned.
-        */
-       if (pf.discard_passdown) {
-               struct request_queue *q = bdev_get_queue(data_dev->bdev);
-               if (!q || !blk_queue_discard(q)) {
-                       DMWARN("Discard unsupported by data device: Disabling discard passdown.");
-                       pf.discard_passdown = 0;
-               }
-       }
-
        pt->pool = pool;
        pt->ti = ti;
        pt->metadata_dev = metadata_dev;
@@ -2268,6 +2284,36 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
        return 0;
 }
 
+static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+       int r;
+
+       r = check_arg_count(argc, 1);
+       if (r)
+               return r;
+
+       r = dm_pool_reserve_metadata_snap(pool->pmd);
+       if (r)
+               DMWARN("reserve_metadata_snap message failed.");
+
+       return r;
+}
+
+static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+       int r;
+
+       r = check_arg_count(argc, 1);
+       if (r)
+               return r;
+
+       r = dm_pool_release_metadata_snap(pool->pmd);
+       if (r)
+               DMWARN("release_metadata_snap message failed.");
+
+       return r;
+}
+
 /*
  * Messages supported:
  *   create_thin       <dev_id>
@@ -2275,6 +2321,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
  *   delete            <dev_id>
  *   trim              <dev_id> <new_size_in_sectors>
  *   set_transaction_id <current_trans_id> <new_trans_id>
+ *   reserve_metadata_snap
+ *   release_metadata_snap
  */
 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
 {
@@ -2294,6 +2342,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        else if (!strcasecmp(argv[0], "set_transaction_id"))
                r = process_set_transaction_id_mesg(argc, argv, pool);
 
+       else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
+               r = process_reserve_metadata_snap_mesg(argc, argv, pool);
+
+       else if (!strcasecmp(argv[0], "release_metadata_snap"))
+               r = process_release_metadata_snap_mesg(argc, argv, pool);
+
        else
                DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
 
@@ -2353,7 +2407,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                if (r)
                        return r;
 
-               r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+               r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
                if (r)
                        return r;
 
@@ -2379,7 +2433,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                       (unsigned long long)pt->low_water_blocks);
 
                count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
-                       !pool->pf.discard_passdown;
+                       !pt->pf.discard_passdown;
                DMEMIT("%u ", count);
 
                if (!pool->pf.zero_new_blocks)
@@ -2388,7 +2442,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                if (!pool->pf.discard_enabled)
                        DMEMIT("ignore_discard ");
 
-               if (!pool->pf.discard_passdown)
+               if (!pt->pf.discard_passdown)
                        DMEMIT("no_discard_passdown ");
 
                break;
@@ -2449,7 +2503,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 1, 0},
+       .version = {1, 2, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2605,9 +2659,9 @@ static int thin_endio(struct dm_target *ti,
                      union map_info *map_context)
 {
        unsigned long flags;
-       struct endio_hook *h = map_context->ptr;
+       struct dm_thin_endio_hook *h = map_context->ptr;
        struct list_head work;
-       struct new_mapping *m, *tmp;
+       struct dm_thin_new_mapping *m, *tmp;
        struct pool *pool = h->tc->pool;
 
        if (h->shared_read_entry) {
@@ -2626,8 +2680,10 @@ static int thin_endio(struct dm_target *ti,
        if (h->all_io_entry) {
                INIT_LIST_HEAD(&work);
                ds_dec(h->all_io_entry, &work);
+               spin_lock_irqsave(&pool->lock, flags);
                list_for_each_entry_safe(m, tmp, &work, list)
                        list_add(&m->list, &pool->prepared_discards);
+               spin_unlock_irqrestore(&pool->lock, flags);
        }
 
        mempool_free(h, pool->endio_hook_pool);
@@ -2745,7 +2801,32 @@ static int __init dm_thin_init(void)
 
        r = dm_register_target(&pool_target);
        if (r)
-               dm_unregister_target(&thin_target);
+               goto bad_pool_target;
+
+       r = -ENOMEM;
+
+       _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+       if (!_cell_cache)
+               goto bad_cell_cache;
+
+       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+       if (!_new_mapping_cache)
+               goto bad_new_mapping_cache;
+
+       _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
+       if (!_endio_hook_cache)
+               goto bad_endio_hook_cache;
+
+       return 0;
+
+bad_endio_hook_cache:
+       kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+       kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+       dm_unregister_target(&pool_target);
+bad_pool_target:
+       dm_unregister_target(&thin_target);
 
        return r;
 }
@@ -2754,11 +2835,15 @@ static void dm_thin_exit(void)
 {
        dm_unregister_target(&thin_target);
        dm_unregister_target(&pool_target);
+
+       kmem_cache_destroy(_cell_cache);
+       kmem_cache_destroy(_new_mapping_cache);
+       kmem_cache_destroy(_endio_hook_cache);
 }
 
 module_init(dm_thin_init);
 module_exit(dm_thin_exit);
 
-MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
+MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");