Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4ffc0a0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (44 commits)
  dm raid1: report fault status
  dm raid1: handle read failures
  dm raid1: fix EIO after log failure
  dm raid1: handle recovery failures
  dm raid1: handle write failures
  dm snapshot: combine consecutive exceptions in memory
  dm: stripe enhanced status return
  dm: stripe trigger event on failure
  dm log: auto load modules
  dm: move deferred bio flushing to workqueue
  dm crypt: use async crypto
  dm crypt: prepare async callback fn
  dm crypt: add completion for async
  dm crypt: add async request mempool
  dm crypt: extract scatterlist processing
  dm crypt: tidy io ref counting
  dm crypt: introduce crypt_write_io_loop
  dm crypt: abstract crypt_write_done
  dm crypt: store sector mapping in dm_crypt_io
  dm crypt: move queue functions
  ...
parents d7511ec8 af195ac8
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ config BLK_DEV_DM

config DM_DEBUG
	boolean "Device mapper debugging support"
	depends on BLK_DEV_DM && EXPERIMENTAL
	depends on BLK_DEV_DM
	---help---
	  Enable this for messages that may help debug device-mapper problems.

@@ -212,7 +212,7 @@ config DM_DEBUG

config DM_CRYPT
	tristate "Crypt target support"
	depends on BLK_DEV_DM && EXPERIMENTAL
	depends on BLK_DEV_DM
	select CRYPTO
	select CRYPTO_CBC
	---help---
@@ -230,34 +230,34 @@ config DM_CRYPT
	  If unsure, say N.

config DM_SNAPSHOT
       tristate "Snapshot target (EXPERIMENTAL)"
       depends on BLK_DEV_DM && EXPERIMENTAL
       tristate "Snapshot target"
       depends on BLK_DEV_DM
       ---help---
         Allow volume managers to take writable snapshots of a device.

config DM_MIRROR
       tristate "Mirror target (EXPERIMENTAL)"
       depends on BLK_DEV_DM && EXPERIMENTAL
       tristate "Mirror target"
       depends on BLK_DEV_DM
       ---help---
         Allow volume managers to mirror logical volumes, also
         needed for live data migration tools such as 'pvmove'.

config DM_ZERO
	tristate "Zero target (EXPERIMENTAL)"
	depends on BLK_DEV_DM && EXPERIMENTAL
	tristate "Zero target"
	depends on BLK_DEV_DM
	---help---
	  A target that discards writes, and returns all zeroes for
	  reads.  Useful in some recovery situations.

config DM_MULTIPATH
	tristate "Multipath target (EXPERIMENTAL)"
	depends on BLK_DEV_DM && EXPERIMENTAL
	tristate "Multipath target"
	depends on BLK_DEV_DM
	---help---
	  Allow volume managers to support multipath hardware.

config DM_MULTIPATH_EMC
	tristate "EMC CX/AX multipath support (EXPERIMENTAL)"
	depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL
	tristate "EMC CX/AX multipath support"
	depends on DM_MULTIPATH && BLK_DEV_DM
	---help---
	  Multipath support for EMC CX/AX series hardware.

+320 −166
Original line number Diff line number Diff line
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include <linux/completion.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -27,21 +28,11 @@
#define DM_MSG_PREFIX "crypt"
#define MESG_STR(x) x, sizeof(x)

/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;
	atomic_t pending;
	int error;
};

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct completion restart;
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
@@ -49,7 +40,27 @@ struct convert_context {
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	int write;
	atomic_t pending;
};

/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;

	struct convert_context ctx;

	atomic_t pending;
	int error;
	sector_t sector;
};

struct dm_crypt_request {
	struct scatterlist sg_in;
	struct scatterlist sg_out;
};

struct crypt_config;
@@ -72,10 +83,11 @@ struct crypt_config {
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 * pool for per bio private data, crypto requests and
	 * encryption requeusts/buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *req_pool;
	mempool_t *page_pool;
	struct bio_set *bs;

@@ -93,9 +105,25 @@ struct crypt_config {
	sector_t iv_offset;
	unsigned int iv_size;

	/*
	 * Layout of each crypto request:
	 *
	 *   struct ablkcipher_request
	 *      context
	 *      padding
	 *   struct dm_crypt_request
	 *      padding
	 *   IV
	 *
	 * The padding is added so that dm_crypt_request and the IV are
	 * correctly aligned.
	 */
	unsigned int dmreq_start;
	struct ablkcipher_request *req;

	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
	struct crypto_ablkcipher *tfm;
	unsigned long flags;
	unsigned int key_size;
	u8 key[0];
@@ -108,6 +136,7 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool;

static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);

/*
 * Different IV generation algorithms:
@@ -188,7 +217,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
		return PTR_ERR(essiv_tfm);
	}
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
	    crypto_ablkcipher_ivsize(cc->tfm)) {
		ti->error = "Block size of ESSIV cipher does "
			    "not match IV size of block cipher";
		crypto_free_cipher(essiv_tfm);
@@ -225,7 +254,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
	unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
	int log = ilog2(bs);

	/* we need to calculate how far we must shift the sector count
@@ -289,42 +318,10 @@ static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
		else
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
	} else {
		if (write)
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
		else
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
	}

	return r;
}

static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
			       sector_t sector, int write)
			       sector_t sector)
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
@@ -333,49 +330,123 @@ static void crypt_convert_init(struct crypt_config *cc,
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->write = write;
}

	init_completion(&ctx->restart);
	/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
	 * Crypto operation can be asynchronous,
	 * ctx->pending is increased after request submission.
	 * We need to ensure that we don't call the crypt finish
	 * operation before pending got incremented
	 * (dependent on crypt submission return code).
	 */
static int crypt_convert(struct crypt_config *cc,
			 struct convert_context *ctx)
{
	int r = 0;
	atomic_set(&ctx->pending, 2);
}

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
static int crypt_convert_block(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct ablkcipher_request *req)
{
	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
		struct scatterlist sg_in, sg_out;
	struct dm_crypt_request *dmreq;
	u8 *iv;
	int r = 0;

		sg_init_table(&sg_in, 1);
		sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
	dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
			 crypto_ablkcipher_alignmask(cc->tfm) + 1);

		sg_init_table(&sg_out, 1);
		sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
	sg_init_table(&dmreq->sg_in, 1);
	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
		    bv_in->bv_offset + ctx->offset_in);

		ctx->offset_in += sg_in.length;
	sg_init_table(&dmreq->sg_out, 1);
	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
		    bv_out->bv_offset + ctx->offset_out);

	ctx->offset_in += 1 << SECTOR_SHIFT;
	if (ctx->offset_in >= bv_in->bv_len) {
		ctx->offset_in = 0;
		ctx->idx_in++;
	}

		ctx->offset_out += sg_out.length;
	ctx->offset_out += 1 << SECTOR_SHIFT;
	if (ctx->offset_out >= bv_out->bv_len) {
		ctx->offset_out = 0;
		ctx->idx_out++;
	}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
					      ctx->write, ctx->sector);
	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
		if (r < 0)
			break;
			return r;
	}

	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
				     1 << SECTOR_SHIFT, iv);

	if (bio_data_dir(ctx->bio_in) == WRITE)
		r = crypto_ablkcipher_encrypt(req);
	else
		r = crypto_ablkcipher_decrypt(req);

	return r;
}

static void kcryptd_async_done(struct crypto_async_request *async_req,
			       int error);
static void crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{
	if (!cc->req)
		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
	ablkcipher_request_set_tfm(cc->req, cc->tfm);
	ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
					     CRYPTO_TFM_REQ_MAY_SLEEP,
					     kcryptd_async_done, ctx);
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
			 struct convert_context *ctx)
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {

		crypt_alloc_req(cc, ctx);

		r = crypt_convert_block(cc, ctx, cc->req);

		switch (r) {
		case -EBUSY:
			wait_for_completion(&ctx->restart);
			INIT_COMPLETION(ctx->restart);
			/* fall through*/
		case -EINPROGRESS:
			atomic_inc(&ctx->pending);
			cc->req = NULL;
			r = 0;
			/* fall through*/
		case 0:
			ctx->sector++;
			continue;
		}

		break;
	}

	/*
	 * If there are pending crypto operation run async
	 * code. Otherwise process return code synchronously.
	 * The step of 2 ensures that async finish doesn't
	 * call crypto finish too early.
	 */
	if (atomic_sub_return(2, &ctx->pending))
		return -EINPROGRESS;

	return r;
}

@@ -455,18 +526,14 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
static void crypt_dec_pending(struct dm_crypt_io *io, int error)
static void crypt_dec_pending(struct dm_crypt_io *io)
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (error < 0)
		io->error = error;
	struct crypt_config *cc = io->target->private;

	if (!atomic_dec_and_test(&io->pending))
		return;

	bio_endio(io->base_bio, io->error);

	mempool_free(io, cc->io_pool);
}

@@ -484,30 +551,11 @@ static void crypt_dec_pending(struct dm_crypt_io *io, int error)
 * starved by new requests which can block in the first stages due
 * to memory allocation.
 */
static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_do_crypt(struct work_struct *work);

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_do_work);
	queue_work(cc->io_queue, &io->work);
}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_do_crypt);
	queue_work(cc->crypt_queue, &io->work);
}

static void crypt_endio(struct bio *clone, int error)
{
	struct dm_crypt_io *io = clone->bi_private;
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;
	unsigned rw = bio_data_dir(clone);

	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;
@@ -515,21 +563,20 @@ static void crypt_endio(struct bio *clone, int error)
	/*
	 * free the processed pages
	 */
	if (!read_io) {
	if (rw == WRITE)
		crypt_free_buffer_pages(cc, clone);
		goto out;
	}

	if (unlikely(error))
		goto out;

	bio_put(clone);

	if (rw == READ && !error) {
		kcryptd_queue_crypt(io);
		return;
	}

out:
	bio_put(clone);
	crypt_dec_pending(io, error);
	if (unlikely(error))
		io->error = error;

	crypt_dec_pending(io);
}

static void clone_init(struct dm_crypt_io *io, struct bio *clone)
@@ -543,12 +590,11 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
	clone->bi_destructor = dm_crypt_bio_destructor;
}

static void process_read(struct dm_crypt_io *io)
static void kcryptd_io_read(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
	sector_t sector = base_bio->bi_sector - io->target->begin;

	atomic_inc(&io->pending);

@@ -559,7 +605,8 @@ static void process_read(struct dm_crypt_io *io)
	 */
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
	if (unlikely(!clone)) {
		crypt_dec_pending(io, -ENOMEM);
		io->error = -ENOMEM;
		crypt_dec_pending(io);
		return;
	}

@@ -567,25 +614,71 @@ static void process_read(struct dm_crypt_io *io)
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
	clone->bi_sector = cc->start + sector;
	clone->bi_sector = cc->start + io->sector;
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

	generic_make_request(clone);
}

static void process_write(struct dm_crypt_io *io)
static void kcryptd_io_write(struct dm_crypt_io *io)
{
	struct bio *clone = io->ctx.bio_out;

	generic_make_request(clone);
}

static void kcryptd_io(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_io_read(io);
	else
		kcryptd_io_write(io);
}

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
	struct convert_context ctx;
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;

	INIT_WORK(&io->work, kcryptd_io);
	queue_work(cc->io_queue, &io->work);
}

static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
					  int error, int async)
{
	struct bio *clone = io->ctx.bio_out;
	struct crypt_config *cc = io->target->private;

	if (unlikely(error < 0)) {
		crypt_free_buffer_pages(cc, clone);
		bio_put(clone);
		io->error = -EIO;
		return;
	}

	/* crypt_convert should have filled the clone bio */
	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);

	clone->bi_sector = cc->start + io->sector;
	io->sector += bio_sectors(clone);

	if (async)
		kcryptd_queue_io(io);
	else {
		atomic_inc(&io->pending);
		generic_make_request(clone);
	}
}

	crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
	unsigned remaining = io->base_bio->bi_size;
	int r;

	/*
	 * The allocated buffers can be smaller than the whole bio,
@@ -594,70 +687,110 @@ static void process_write(struct dm_crypt_io *io)
	while (remaining) {
		clone = crypt_alloc_buffer(io, remaining);
		if (unlikely(!clone)) {
			crypt_dec_pending(io, -ENOMEM);
			io->error = -ENOMEM;
			return;
		}

		ctx.bio_out = clone;
		ctx.idx_out = 0;
		io->ctx.bio_out = clone;
		io->ctx.idx_out = 0;

		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
			crypt_free_buffer_pages(cc, clone);
			bio_put(clone);
			crypt_dec_pending(io, -EIO);
		remaining -= clone->bi_size;

		r = crypt_convert(cc, &io->ctx);

		if (r != -EINPROGRESS) {
			kcryptd_crypt_write_io_submit(io, r, 0);
			if (unlikely(r < 0))
				return;
		}
		} else
			atomic_inc(&io->pending);

		/* crypt_convert should have filled the clone bio */
		BUG_ON(ctx.idx_out < clone->bi_vcnt);
		/* out of memory -> run queues */
		if (unlikely(remaining))
			congestion_wait(WRITE, HZ/100);
	}
}

		clone->bi_sector = cc->start + sector;
		remaining -= clone->bi_size;
		sector += bio_sectors(clone);
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

		/* Grab another reference to the io struct
		 * before we kick off the request */
		if (remaining)
	/*
	 * Prevent io from disappearing until this function completes.
	 */
	atomic_inc(&io->pending);

		generic_make_request(clone);
	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
	kcryptd_crypt_write_convert_loop(io);

		/* Do not reference clone after this - it
		 * may be gone already. */

		/* out of memory -> run queues */
		if (remaining)
			congestion_wait(WRITE, HZ/100);
	crypt_dec_pending(io);
}

static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
{
	if (unlikely(error < 0))
		io->error = -EIO;

	crypt_dec_pending(io);
}

static void process_read_endio(struct dm_crypt_io *io)
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct convert_context ctx;
	int r = 0;

	atomic_inc(&io->pending);

	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
			   io->base_bio->bi_sector - io->target->begin, 0);
	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
			   io->sector);

	crypt_dec_pending(io, crypt_convert(cc, &ctx));
	r = crypt_convert(cc, &io->ctx);

	if (r != -EINPROGRESS)
		kcryptd_crypt_read_done(io, r);

	crypt_dec_pending(io);
}

static void kcryptd_do_work(struct work_struct *work)
static void kcryptd_async_done(struct crypto_async_request *async_req,
			       int error)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
	struct convert_context *ctx = async_req->data;
	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
	struct crypt_config *cc = io->target->private;

	if (error == -EINPROGRESS) {
		complete(&ctx->restart);
		return;
	}

	mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);

	if (!atomic_dec_and_test(&ctx->pending))
		return;

	if (bio_data_dir(io->base_bio) == READ)
		process_read(io);
		kcryptd_crypt_read_done(io, error);
	else
		kcryptd_crypt_write_io_submit(io, error, 1);
}

static void kcryptd_do_crypt(struct work_struct *work)
static void kcryptd_crypt(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		process_read_endio(io);
		kcryptd_crypt_read_convert(io);
	else
		process_write(io);
		kcryptd_crypt_write_convert(io);
}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_crypt);
	queue_work(cc->crypt_queue, &io->work);
}

/*
@@ -733,7 +866,7 @@ static int crypt_wipe_key(struct crypt_config *cc)
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
	struct crypto_blkcipher *tfm;
	struct crypto_ablkcipher *tfm;
	char *tmp;
	char *cipher;
	char *chainmode;
@@ -787,7 +920,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		goto bad_cipher;
	}

	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0);
	if (IS_ERR(tfm)) {
		ti->error = "Error allocating crypto tfm";
		goto bad_cipher;
@@ -821,7 +954,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
		goto bad_ivmode;

	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	cc->iv_size = crypto_ablkcipher_ivsize(tfm);
	if (cc->iv_size)
		/* at least a 64 bit sector number should fit in our buffer */
		cc->iv_size = max(cc->iv_size,
@@ -841,6 +974,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		goto bad_slab_pool;
	}

	cc->dmreq_start = sizeof(struct ablkcipher_request);
	cc->dmreq_start += crypto_ablkcipher_reqsize(tfm);
	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
	cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) &
			   ~(crypto_tfm_ctx_alignment() - 1);

	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
			sizeof(struct dm_crypt_request) + cc->iv_size);
	if (!cc->req_pool) {
		ti->error = "Cannot allocate crypt request mempool";
		goto bad_req_pool;
	}
	cc->req = NULL;

	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
	if (!cc->page_pool) {
		ti->error = "Cannot allocate page mempool";
@@ -853,7 +1000,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		goto bad_bs;
	}

	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
	if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) {
		ti->error = "Error setting key";
		goto bad_device;
	}
@@ -914,12 +1061,14 @@ bad_device:
bad_bs:
	mempool_destroy(cc->page_pool);
bad_page_pool:
	mempool_destroy(cc->req_pool);
bad_req_pool:
	mempool_destroy(cc->io_pool);
bad_slab_pool:
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
bad_ivmode:
	crypto_free_blkcipher(tfm);
	crypto_free_ablkcipher(tfm);
bad_cipher:
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
@@ -934,14 +1083,18 @@ static void crypt_dtr(struct dm_target *ti)
	destroy_workqueue(cc->io_queue);
	destroy_workqueue(cc->crypt_queue);

	if (cc->req)
		mempool_free(cc->req, cc->req_pool);

	bioset_free(cc->bs);
	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->req_pool);
	mempool_destroy(cc->io_pool);

	kfree(cc->iv_mode);
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
	crypto_free_blkcipher(cc->tfm);
	crypto_free_ablkcipher(cc->tfm);
	dm_put_device(ti, cc->dev);

	/* Must zero key material before freeing */
@@ -958,6 +1111,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->base_bio = bio;
	io->sector = bio->bi_sector - ti->begin;
	io->error = 0;
	atomic_set(&io->pending, 0);

+1 −1
Original line number Diff line number Diff line
@@ -449,7 +449,7 @@ static void persistent_destroy(struct exception_store *store)

static int persistent_read_metadata(struct exception_store *store)
{
	int r, new_snapshot;
	int r, uninitialized_var(new_snapshot);
	struct pstore *ps = get_info(store);

	/*
+23 −9
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>

#include <asm/uaccess.h>

@@ -702,7 +703,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
	int r;
	char *new_name = (char *) param + param->data_start;

	if (new_name < (char *) param->data ||
	if (new_name < param->data ||
	    invalid_str(new_name, (void *) param + param_size)) {
		DMWARN("Invalid new logical volume name supplied.");
		return -EINVAL;
@@ -728,7 +729,7 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
	if (!md)
		return -ENXIO;

	if (geostr < (char *) param->data ||
	if (geostr < param->data ||
	    invalid_str(geostr, (void *) param + param_size)) {
		DMWARN("Invalid geometry supplied.");
		goto out;
@@ -1350,10 +1351,10 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
{
	struct dm_ioctl tmp, *dmi;

	if (copy_from_user(&tmp, user, sizeof(tmp)))
	if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
		return -EFAULT;

	if (tmp.data_size < sizeof(tmp))
	if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
		return -EINVAL;

	dmi = vmalloc(tmp.data_size);
@@ -1397,13 +1398,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
	return 0;
}

static int ctl_ioctl(struct inode *inode, struct file *file,
		     uint command, ulong u)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
	int r = 0;
	unsigned int cmd;
	struct dm_ioctl *param;
	struct dm_ioctl __user *user = (struct dm_ioctl __user *) u;
	struct dm_ioctl *uninitialized_var(param);
	ioctl_fn fn = NULL;
	size_t param_size;

@@ -1471,8 +1470,23 @@ static int ctl_ioctl(struct inode *inode, struct file *file,
	return r;
}

static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
{
	return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u);
}

#ifdef CONFIG_COMPAT
static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
{
	return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u));
}
#else
#define dm_compat_ctl_ioctl NULL
#endif

static const struct file_operations _ctl_fops = {
	.ioctl	 = ctl_ioctl,
	.unlocked_ioctl	 = dm_ctl_ioctl,
	.compat_ioctl = dm_compat_ctl_ioctl,
	.owner	 = THIS_MODULE,
};

+50 −1
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ int dm_unregister_dirty_log_type(struct dirty_log_type *type)
	return 0;
}

static struct dirty_log_type *get_type(const char *type_name)
static struct dirty_log_type *_get_type(const char *type_name)
{
	struct dirty_log_type *type;

@@ -61,6 +61,55 @@ static struct dirty_log_type *get_type(const char *type_name)
	return NULL;
}

/*
 * get_type
 * @type_name
 *
 * Attempt to retrieve the dirty_log_type by name.  If not already
 * available, attempt to load the appropriate module.
 *
 * Log modules are named "dm-log-" followed by the 'type_name'.
 * Modules may contain multiple types.
 * This function will first try the module "dm-log-<type_name>",
 * then truncate 'type_name' on the last '-' and try again.
 *
 * For example, if type_name was "clustered-disk", it would search
 * 'dm-log-clustered-disk' then 'dm-log-clustered'.
 *
 * Returns: dirty_log_type* on success, NULL on failure
 */
static struct dirty_log_type *get_type(const char *type_name)
{
	char *p, *type_name_dup;
	struct dirty_log_type *type;

	type = _get_type(type_name);
	if (type)
		return type;

	type_name_dup = kstrdup(type_name, GFP_KERNEL);
	if (!type_name_dup) {
		DMWARN("No memory left to attempt log module load for \"%s\"",
		       type_name);
		return NULL;
	}

	while (request_module("dm-log-%s", type_name_dup) ||
	       !(type = _get_type(type_name))) {
		p = strrchr(type_name_dup, '-');
		if (!p)
			break;
		p[0] = '\0';
	}

	if (!type)
		DMWARN("Module for logging type \"%s\" not found.", type_name);

	kfree(type_name_dup);

	return type;
}

static void put_type(struct dirty_log_type *type)
{
	spin_lock(&_lock);
Loading