struct work_struct work;
atomic_t pending;
int error;
+ int post_process;
};
/*
*/
mempool_t *io_pool;
mempool_t *page_pool;
+ struct bio_set *bs;
/*
* crypto related data
u8 key[0];
};
-#define MIN_IOS 256
+#define MIN_IOS 16
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8
return r;
}
+ static void dm_crypt_bio_destructor(struct bio *bio)
+ {
+ struct crypt_io *io = bio->bi_private;
+ struct crypt_config *cc = io->target->private;
+
+ bio_free(bio, cc->bs);
+ }
+
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;
- /*
- * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
- * to fail earlier. This is not necessary but increases throughput.
- * FIXME: Is this really intelligent?
- */
- if (base_bio)
- clone = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
- else
- clone = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
+ if (base_bio) {
+ clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
+ __bio_clone(clone, base_bio);
+ } else
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
+
if (!clone)
return NULL;
+ clone->bi_destructor = dm_crypt_bio_destructor;
+
/* if the last bio was not complete, continue where that one ended */
clone->bi_idx = *bio_vec_idx;
clone->bi_vcnt = *bio_vec_idx;
* kcryptd:
*
* Needed because it would be very unwise to do decryption in an
- * interrupt context, so bios returning from read requests get
- * queued here.
+ * interrupt context.
*/
static struct workqueue_struct *_kcryptd_workqueue;
static void kcryptd_do_work(void *data);
if (!read_io)
crypt_free_buffer_pages(cc, clone, done);
+ /* keep going - not finished yet */
if (unlikely(clone->bi_size))
return 1;
- /*
- * successful reads are decrypted by the worker thread
- */
if (!read_io)
goto out;
}
bio_put(clone);
+ io->post_process = 1;
kcryptd_queue_io(io);
return 0;
clone->bi_rw = io->base_bio->bi_rw;
}
-static int process_read(struct crypt_io *io)
+static void process_read(struct crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data *afterwards*.
*/
- clone = bio_alloc(GFP_NOIO, bio_segments(base_bio));
+ clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
if (unlikely(!clone)) {
dec_pending(io, -ENOMEM);
- return 0;
+ return;
}
clone_init(io, clone);
+ clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size;
sizeof(struct bio_vec) * clone->bi_vcnt);
generic_make_request(clone);
-
- return 0;
}
-static int process_write(struct crypt_io *io)
+static void process_write(struct crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
while (remaining) {
clone = crypt_alloc_buffer(cc, base_bio->bi_size,
io->first_clone, &bvec_idx);
- if (unlikely(!clone))
- goto cleanup;
+ if (unlikely(!clone)) {
+ dec_pending(io, -ENOMEM);
+ return;
+ }
ctx.bio_out = clone;
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
crypt_free_buffer_pages(cc, clone, clone->bi_size);
bio_put(clone);
- goto cleanup;
+ dec_pending(io, -EIO);
+ return;
}
clone_init(io, clone);
io->first_clone = clone;
}
- atomic_inc(&io->pending);
-
remaining -= clone->bi_size;
sector += bio_sectors(clone);
+ /* prevent bio_put of first_clone */
+ if (remaining)
+ atomic_inc(&io->pending);
+
generic_make_request(clone);
/* out of memory -> run queues */
if (remaining)
blk_congestion_wait(bio_data_dir(clone), HZ/100);
}
-
- /* drop reference, clones could have returned before we reach this */
- dec_pending(io, 0);
- return 0;
-
-cleanup:
- if (io->first_clone) {
- dec_pending(io, -ENOMEM);
- return 0;
- }
-
- /* if no bio has been dispatched yet, we can directly return the error */
- mempool_free(io, cc->io_pool);
- return -ENOMEM;
}
static void process_read_endio(struct crypt_io *io)
{
struct crypt_io *io = data;
- process_read_endio(io);
+ if (io->post_process)
+ process_read_endio(io);
+ else if (bio_data_dir(io->base_bio) == READ)
+ process_read(io);
+ else
+ process_write(io);
}
/*
goto bad4;
}
+ cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4);
+ if (!cc->bs) {
+ ti->error = "Cannot allocate crypt bioset";
+ goto bad_bs;
+ }
+
if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
ti->error = "Error setting key";
goto bad5;
return 0;
bad5:
+ bioset_free(cc->bs);
+bad_bs:
mempool_destroy(cc->page_pool);
bad4:
mempool_destroy(cc->io_pool);
{
struct crypt_config *cc = (struct crypt_config *) ti->private;
+ bioset_free(cc->bs);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->io_pool);
struct crypt_io *io;
io = mempool_alloc(cc->io_pool, GFP_NOIO);
-
io->target = ti;
io->base_bio = bio;
io->first_clone = NULL;
- io->error = 0;
+ io->error = io->post_process = 0;
atomic_set(&io->pending, 0);
+ kcryptd_queue_io(io);
- if (bio_data_dir(bio) == WRITE)
- return process_write(io);
-
- return process_read(io);
+ return 0;
}
static int crypt_status(struct dm_target *ti, status_type_t type,
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 2, 0},
+ .version= {1, 3, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,