Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7272c30b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device-mapper updates from Alasdair G Kergon:
- Flip the thin target into new read-only or failed modes if errors
  are detected;
- Handle chunk sizes that are not powers of two in the snapshot and
  thin targets;
- Provide a way for userspace to avoid replacing an already-loaded
  multipath hardware handler while booting;
- Reduce dm_thin_endio_hook slab size to avoid allocation failures;
- Numerous small changes and cleanups to the code.

* tag 'dm-3.6-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (63 commits)
  dm thin: commit before gathering status
  dm thin: add read only and fail io modes
  dm thin metadata: introduce dm_pool_abort_metadata
  dm thin metadata: introduce dm_pool_metadata_set_read_only
  dm persistent data: introduce dm_bm_set_read_only
  dm thin: reduce number of metadata commits
  dm thin metadata: add dm_thin_changed_this_transaction
  dm thin metadata: add format option to dm_pool_metadata_open
  dm thin metadata: tidy up open and format error paths
  dm thin metadata: only check incompat features on open
  dm thin metadata: remove duplicate pmd initialisation
  dm thin metadata: remove create parameter from __create_persistent_data_objects
  dm thin metadata: move __superblock_all_zeroes to __open_or_format_metadata
  dm thin metadata: remove nr_blocks arg from __create_persistent_data_objects
  dm thin metadata: split __open or format metadata
  dm thin metadata: use struct dm_pool_metadata members in __open_or_format_metadata
  dm thin metadata: zero unused superblock uuid
  dm thin metadata: lift __begin_transaction out of __write_initial_superblock
  dm thin metadata: move dm_commit_pool_metadata into __write_initial_superblock
  dm thin metadata: factor out __write_initial_superblock
  ...
parents 6f51f515 1f4e0ff0
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -9,15 +9,14 @@ devices in parallel.

Parameters: <num devs> <chunk size> [<dev path> <offset>]+
    <num devs>: Number of underlying devices.
    <chunk size>: Size of each chunk of data. Must be a power-of-2 and at
                  least as large as the system's PAGE_SIZE.
    <chunk size>: Size of each chunk of data. Must be at least as
                  large as the system's PAGE_SIZE.
    <dev path>: Full pathname to the underlying block-device, or a
                "major:minor" device-number.
    <offset>: Starting sector within the device.

One or more underlying devices can be specified. The striped device size must
be a multiple of the chunk size and a multiple of the number of underlying
devices.
be a multiple of the chunk size multiplied by the number of underlying devices.


Example scripts
+23 −1
Original line number Diff line number Diff line
@@ -231,6 +231,9 @@ i) Constructor
      no_discard_passdown: Don't pass discards down to the underlying
			   data device, but just remove the mapping.

      read_only: Don't allow any changes to be made to the pool
		 metadata.

    Data block size must be between 64KB (128 sectors) and 1GB
    (2097152 sectors) inclusive.

@@ -239,7 +242,7 @@ ii) Status

    <transaction id> <used metadata blocks>/<total metadata blocks>
    <used data blocks>/<total data blocks> <held metadata root>

    [no_]discard_passdown ro|rw

    transaction id:
	A 64-bit number used by userspace to help synchronise with metadata
@@ -257,6 +260,21 @@ ii) Status
	held root.  This feature is not yet implemented so '-' is
	always returned.

    discard_passdown|no_discard_passdown
	Whether or not discards are actually being passed down to the
	underlying device.  When this is enabled when loading the table,
	it can get disabled if the underlying device doesn't support it.

    ro|rw
	If the pool encounters certain types of device failures it will
	drop into a read-only metadata mode in which no changes to
	the pool metadata (like allocating new blocks) are permitted.

	In serious cases where even a read-only mode is deemed unsafe
	no further I/O will be permitted and the status will just
	contain the string 'Fail'.  The userspace recovery tools
	should then be used.

iii) Messages

    create_thin <dev id>
@@ -329,3 +347,7 @@ regain some space then send the 'trim' message to the pool.
ii) Status

     <nr mapped sectors> <highest mapped sector>

	If the pool has encountered device errors and failed, the status
	will just contain the string 'Fail'.  The userspace recovery
	tools should then be used.
+0 −9
Original line number Diff line number Diff line
@@ -260,15 +260,6 @@ config DM_DEBUG_BLOCK_STACK_TRACING

	  If unsure, say N.

config DM_DEBUG_SPACE_MAPS
	boolean "Extra validation for thin provisioning space maps"
	depends on DM_THIN_PROVISIONING
	---help---
	  Enable this for messages that may help debug problems with the
	  space maps used by thin provisioning.

          If unsure, say N.

config DM_MIRROR
       tristate "Mirror target"
       depends on BLK_DEV_DM
+105 −114
Original line number Diff line number Diff line
@@ -42,21 +42,21 @@ struct convert_context {
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	atomic_t pending;
	sector_t cc_sector;
	atomic_t cc_pending;
};

/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct crypt_config *cc;
	struct bio *base_bio;
	struct work_struct work;

	struct convert_context ctx;

	atomic_t pending;
	atomic_t io_pending;
	int error;
	sector_t sector;
	struct dm_crypt_io *base_io;
@@ -109,9 +109,6 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
 */
struct crypt_cpu {
	struct ablkcipher_request *req;
	/* ESSIV: struct crypto_cipher *essiv_tfm */
	void *iv_private;
	struct crypto_ablkcipher *tfms[0];
};

/*
@@ -151,6 +148,10 @@ struct crypt_config {
	 * per_cpu_ptr() only.
	 */
	struct crypt_cpu __percpu *cpu;

	/* ESSIV: struct crypto_cipher *essiv_tfm */
	void *iv_private;
	struct crypto_ablkcipher **tfms;
	unsigned tfms_count;

	/*
@@ -193,7 +194,7 @@ static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
 */
static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
{
	return __this_cpu_ptr(cc->cpu)->tfms[0];
	return cc->tfms[0];
}

/*
@@ -258,7 +259,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
	struct hash_desc desc;
	struct scatterlist sg;
	struct crypto_cipher *essiv_tfm;
	int err, cpu;
	int err;

	sg_init_one(&sg, cc->key, cc->key_size);
	desc.tfm = essiv->hash_tfm;
@@ -268,14 +269,12 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
	if (err)
		return err;

	for_each_possible_cpu(cpu) {
		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
	essiv_tfm = cc->iv_private;

	err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
			    crypto_hash_digestsize(essiv->hash_tfm));
	if (err)
		return err;
	}

	return 0;
}
@@ -286,16 +285,14 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
	struct crypto_cipher *essiv_tfm;
	int cpu, r, err = 0;
	int r, err = 0;

	memset(essiv->salt, 0, salt_size);

	for_each_possible_cpu(cpu) {
		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
	essiv_tfm = cc->iv_private;
	r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
	if (r)
		err = r;
	}

	return err;
}
@@ -335,8 +332,6 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
	int cpu;
	struct crypt_cpu *cpu_cc;
	struct crypto_cipher *essiv_tfm;
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;

@@ -346,15 +341,12 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
	kzfree(essiv->salt);
	essiv->salt = NULL;

	for_each_possible_cpu(cpu) {
		cpu_cc = per_cpu_ptr(cc->cpu, cpu);
		essiv_tfm = cpu_cc->iv_private;
	essiv_tfm = cc->iv_private;

	if (essiv_tfm)
		crypto_free_cipher(essiv_tfm);

		cpu_cc->iv_private = NULL;
	}
	cc->iv_private = NULL;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -363,7 +355,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
	struct crypto_cipher *essiv_tfm = NULL;
	struct crypto_hash *hash_tfm = NULL;
	u8 *salt = NULL;
	int err, cpu;
	int err;

	if (!opts) {
		ti->error = "Digest algorithm missing for ESSIV mode";
@@ -388,15 +380,13 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
	cc->iv_gen_private.essiv.salt = salt;
	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;

	for_each_possible_cpu(cpu) {
	essiv_tfm = setup_essiv_cpu(cc, ti, salt,
				crypto_hash_digestsize(hash_tfm));
	if (IS_ERR(essiv_tfm)) {
		crypt_iv_essiv_dtr(cc);
		return PTR_ERR(essiv_tfm);
	}
		per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
	}
	cc->iv_private = essiv_tfm;

	return 0;

@@ -410,7 +400,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
			      struct dm_crypt_request *dmreq)
{
	struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
	struct crypto_cipher *essiv_tfm = cc->iv_private;

	memset(iv, 0, cc->iv_size);
	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
@@ -664,7 +654,7 @@ static void crypt_convert_init(struct crypt_config *cc,
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->cc_sector = sector + cc->iv_offset;
	init_completion(&ctx->restart);
}

@@ -695,12 +685,12 @@ static int crypt_convert_block(struct crypt_config *cc,
	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
	struct dm_crypt_request *dmreq;
	u8 *iv;
	int r = 0;
	int r;

	dmreq = dmreq_of_req(cc, req);
	iv = iv_of_dmreq(cc, dmreq);

	dmreq->iv_sector = ctx->sector;
	dmreq->iv_sector = ctx->cc_sector;
	dmreq->ctx = ctx;
	sg_init_table(&dmreq->sg_in, 1);
	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -749,12 +739,12 @@ static void crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{
	struct crypt_cpu *this_cc = this_crypt_config(cc);
	unsigned key_index = ctx->sector & (cc->tfms_count - 1);
	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);

	if (!this_cc->req)
		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);

	ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
	ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
	ablkcipher_request_set_callback(this_cc->req,
	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
@@ -769,14 +759,14 @@ static int crypt_convert(struct crypt_config *cc,
	struct crypt_cpu *this_cc = this_crypt_config(cc);
	int r;

	atomic_set(&ctx->pending, 1);
	atomic_set(&ctx->cc_pending, 1);

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {

		crypt_alloc_req(cc, ctx);

		atomic_inc(&ctx->pending);
		atomic_inc(&ctx->cc_pending);

		r = crypt_convert_block(cc, ctx, this_cc->req);

@@ -788,19 +778,19 @@ static int crypt_convert(struct crypt_config *cc,
			/* fall through*/
		case -EINPROGRESS:
			this_cc->req = NULL;
			ctx->sector++;
			ctx->cc_sector++;
			continue;

		/* sync */
		case 0:
			atomic_dec(&ctx->pending);
			ctx->sector++;
			atomic_dec(&ctx->cc_pending);
			ctx->cc_sector++;
			cond_resched();
			continue;

		/* error */
		default:
			atomic_dec(&ctx->pending);
			atomic_dec(&ctx->cc_pending);
			return r;
		}
	}
@@ -811,7 +801,7 @@ static int crypt_convert(struct crypt_config *cc,
static void dm_crypt_bio_destructor(struct bio *bio)
{
	struct dm_crypt_io *io = bio->bi_private;
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;

	bio_free(bio, cc->bs);
}
@@ -825,7 +815,7 @@ static void dm_crypt_bio_destructor(struct bio *bio)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
				      unsigned *out_of_pages)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;
	struct bio *clone;
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
@@ -884,26 +874,25 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
	}
}

static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
					  struct bio *bio, sector_t sector)
{
	struct crypt_config *cc = ti->private;
	struct dm_crypt_io *io;

	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->cc = cc;
	io->base_bio = bio;
	io->sector = sector;
	io->error = 0;
	io->base_io = NULL;
	atomic_set(&io->pending, 0);
	atomic_set(&io->io_pending, 0);

	return io;
}

static void crypt_inc_pending(struct dm_crypt_io *io)
{
	atomic_inc(&io->pending);
	atomic_inc(&io->io_pending);
}

/*
@@ -913,12 +902,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
 */
static void crypt_dec_pending(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;
	struct bio *base_bio = io->base_bio;
	struct dm_crypt_io *base_io = io->base_io;
	int error = io->error;

	if (!atomic_dec_and_test(&io->pending))
	if (!atomic_dec_and_test(&io->io_pending))
		return;

	mempool_free(io, cc->io_pool);
@@ -952,7 +941,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
static void crypt_endio(struct bio *clone, int error)
{
	struct dm_crypt_io *io = clone->bi_private;
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;
	unsigned rw = bio_data_dir(clone);

	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
@@ -979,7 +968,7 @@ static void crypt_endio(struct bio *clone, int error)

static void clone_init(struct dm_crypt_io *io, struct bio *clone)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
@@ -990,7 +979,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)

static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;

@@ -1038,7 +1027,7 @@ static void kcryptd_io(struct work_struct *work)

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;

	INIT_WORK(&io->work, kcryptd_io);
	queue_work(cc->io_queue, &io->work);
@@ -1047,7 +1036,7 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
{
	struct bio *clone = io->ctx.bio_out;
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;

	if (unlikely(io->error < 0)) {
		crypt_free_buffer_pages(cc, clone);
@@ -1069,7 +1058,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)

static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;
	struct bio *clone;
	struct dm_crypt_io *new_io;
	int crypt_finished;
@@ -1107,7 +1096,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
		if (r < 0)
			io->error = -EIO;

		crypt_finished = atomic_dec_and_test(&io->ctx.pending);
		crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);

		/* Encryption was already finished, submit io now */
		if (crypt_finished) {
@@ -1135,7 +1124,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
		 * between fragments, so switch to a new dm_crypt_io structure.
		 */
		if (unlikely(!crypt_finished && remaining)) {
			new_io = crypt_io_alloc(io->target, io->base_bio,
			new_io = crypt_io_alloc(io->cc, io->base_bio,
						sector);
			crypt_inc_pending(new_io);
			crypt_convert_init(cc, &new_io->ctx, NULL,
@@ -1169,7 +1158,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)

static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;
	int r = 0;

	crypt_inc_pending(io);
@@ -1181,7 +1170,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
	if (r < 0)
		io->error = -EIO;

	if (atomic_dec_and_test(&io->ctx.pending))
	if (atomic_dec_and_test(&io->ctx.cc_pending))
		kcryptd_crypt_read_done(io);

	crypt_dec_pending(io);
@@ -1193,7 +1182,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
	struct dm_crypt_request *dmreq = async_req->data;
	struct convert_context *ctx = dmreq->ctx;
	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;

	if (error == -EINPROGRESS) {
		complete(&ctx->restart);
@@ -1208,7 +1197,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,

	mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);

	if (!atomic_dec_and_test(&ctx->pending))
	if (!atomic_dec_and_test(&ctx->cc_pending))
		return;

	if (bio_data_dir(io->base_bio) == READ)
@@ -1229,7 +1218,7 @@ static void kcryptd_crypt(struct work_struct *work)

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->cc;

	INIT_WORK(&io->work, kcryptd_crypt);
	queue_work(cc->crypt_queue, &io->work);
@@ -1241,7 +1230,6 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';
@@ -1250,9 +1238,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
		if (kstrtou8(buffer, 16, &key[i]))
			return -EINVAL;
	}

@@ -1276,29 +1262,38 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
	}
}

static void crypt_free_tfms(struct crypt_config *cc, int cpu)
static void crypt_free_tfms(struct crypt_config *cc)
{
	struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
	unsigned i;

	if (!cc->tfms)
		return;

	for (i = 0; i < cc->tfms_count; i++)
		if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
			crypto_free_ablkcipher(cpu_cc->tfms[i]);
			cpu_cc->tfms[i] = NULL;
		if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
			crypto_free_ablkcipher(cc->tfms[i]);
			cc->tfms[i] = NULL;
		}

	kfree(cc->tfms);
	cc->tfms = NULL;
}

static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
{
	struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
	unsigned i;
	int err;

	cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
			   GFP_KERNEL);
	if (!cc->tfms)
		return -ENOMEM;

	for (i = 0; i < cc->tfms_count; i++) {
		cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
		if (IS_ERR(cpu_cc->tfms[i])) {
			err = PTR_ERR(cpu_cc->tfms[i]);
			crypt_free_tfms(cc, cpu);
		cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
		if (IS_ERR(cc->tfms[i])) {
			err = PTR_ERR(cc->tfms[i]);
			crypt_free_tfms(cc);
			return err;
		}
	}
@@ -1309,16 +1304,15 @@ static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
static int crypt_setkey_allcpus(struct crypt_config *cc)
{
	unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
	int cpu, err = 0, i, r;
	int err = 0, i, r;

	for_each_possible_cpu(cpu) {
	for (i = 0; i < cc->tfms_count; i++) {
			r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
						     cc->key + (i * subkey_size), subkey_size);
		r = crypto_ablkcipher_setkey(cc->tfms[i],
					     cc->key + (i * subkey_size),
					     subkey_size);
		if (r)
			err = r;
	}
	}

	return err;
}
@@ -1379,9 +1373,10 @@ static void crypt_dtr(struct dm_target *ti)
			cpu_cc = per_cpu_ptr(cc->cpu, cpu);
			if (cpu_cc->req)
				mempool_free(cpu_cc->req, cc->req_pool);
			crypt_free_tfms(cc, cpu);
		}

	crypt_free_tfms(cc);

	if (cc->bs)
		bioset_free(cc->bs);

@@ -1414,7 +1409,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
	struct crypt_config *cc = ti->private;
	char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
	char *cipher_api = NULL;
	int cpu, ret = -EINVAL;
	int ret = -EINVAL;
	char dummy;

	/* Convert to crypto api definition? */
@@ -1455,8 +1450,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
	if (tmp)
		DMWARN("Ignoring unexpected additional cipher options");

	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
				 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
				 __alignof__(struct crypt_cpu));
	if (!cc->cpu) {
		ti->error = "Cannot allocate per cpu state";
@@ -1489,13 +1483,11 @@ static int crypt_ctr_cipher(struct dm_target *ti,
	}

	/* Allocate cipher */
	for_each_possible_cpu(cpu) {
		ret = crypt_alloc_tfms(cc, cpu, cipher_api);
	ret = crypt_alloc_tfms(cc, cipher_api);
	if (ret < 0) {
		ti->error = "Error allocating crypto tfm";
		goto bad;
	}
	}

	/* Initialize and set key */
	ret = crypt_set_key(cc, key);
@@ -1702,7 +1694,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	}

	ti->num_flush_requests = 1;
	ti->discard_zeroes_data_unsupported = 1;
	ti->discard_zeroes_data_unsupported = true;

	return 0;

@@ -1715,7 +1707,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
	struct dm_crypt_io *io;
	struct crypt_config *cc;
	struct crypt_config *cc = ti->private;

	/*
	 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
@@ -1723,14 +1715,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
	 * - for REQ_DISCARD caller must use flush if IO ordering matters
	 */
	if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
		cc = ti->private;
		bio->bi_bdev = cc->dev->bdev;
		if (bio_sectors(bio))
			bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
		return DM_MAPIO_REMAPPED;
	}

	io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));

	if (bio_data_dir(io->base_bio) == READ) {
		if (kcryptd_io_read(io, GFP_NOWAIT))
@@ -1742,7 +1733,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
			unsigned status_flags, char *result, unsigned maxlen)
{
	struct crypt_config *cc = ti->private;
	unsigned int sz = 0;
+1 −1
Original line number Diff line number Diff line
@@ -295,7 +295,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
}

static int delay_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned maxlen)
			unsigned status_flags, char *result, unsigned maxlen)
{
	struct delay_c *dc = ti->private;
	int sz = 0;
Loading