Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c083ebe7 authored by Milan Broz's avatar Milan Broz Committed by Eric Biggers
Browse files

BACKPORT: dm crypt: optionally support larger encryption sector size



Add  optional "sector_size"  parameter that specifies encryption sector
size (atomic unit of block device encryption).

Parameter can be in range 512 - 4096 bytes and must be power of two.
For compatibility reasons, the maximal IO must fit into the page limit,
so the limit is set to the minimal page size possible (4096 bytes).

NOTE: this device cannot yet be handled by cryptsetup if this parameter
is set.

IV for the sector is calculated from the 512 bytes sector offset unless
the iv_large_sectors option is used.

Test script using dmsetup:

  DEV="/dev/sdb"
  DEV_SIZE=$(blockdev --getsz $DEV)
  KEY="9c1185a5c5e9fc54612808977ee8f548b2258d31ddadef707ba62c166051b9e3cd0294c27515f2bccee924e8823ca6e124b8fc3167ed478bca702babe4e130ac"
  BLOCK_SIZE=4096

  # dmsetup create test_crypt --table "0 $DEV_SIZE crypt aes-xts-plain64 $KEY 0 $DEV 0 1 sector_size:$BLOCK_SIZE"
  # dmsetup table --showkeys test_crypt

Signed-off-by: default avatarMilan Broz <gmazyland@gmail.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>

(cherry picked from commit 8f0009a225171cc1b76a6b443de5137b26e1374b)

Conflicts:
	Documentation/device-mapper/dm-crypt.txt
	drivers/md/dm-crypt.c

Bug: 112008522

Test: In an x86_64 VM, wrote 0's to a dm-crypt target using Adiantum and
      a fixed key and verified the resulting ciphertext on the device
      matches that produced by the upstream kernel in the same case.
      Did this for both 512 and 4096-byte sectors.

        key=EC0531985A96E87F756035F804D051DFE50036987D9AD706AAF9BDBDD1F5B786
        dmsetup create crypt --table \
            "0 $((1<<17)) crypt xchacha12,aes-adiantum-plain64 $key 0 /dev/vdc 0"
        dd if=/dev/zero bs=4096 count=$((1<<14)) of=/dev/mapper/crypt
        sync && echo 3 > /proc/sys/vm/drop_caches
        head -c $((1<<26)) /dev/vdc | md5sum # 44e24946ce308f3b4495bc2ff9cade8b
        head -c $((1<<26)) /dev/vdc | xz -9 | wc -c
        dmsetup remove crypt

        key=EC0531985A96E87F756035F804D051DFE50036987D9AD706AAF9BDBDD1F5B786
        dmsetup create crypt --table \
            "0 $((1<<17)) crypt xchacha12,aes-adiantum-plain64 $key 0 /dev/vdc 0 2 sector_size:4096 iv_large_sectors"
        dd if=/dev/zero bs=4096 count=$((1<<14)) of=/dev/mapper/crypt
        sync && echo 3 > /proc/sys/vm/drop_caches
        head -c $((1<<26)) /dev/vdc | md5sum # 8aa1cb4f5ebef575b15b5f8d64763118
        head -c $((1<<26)) /dev/vdc | xz -9 | wc -c
        dmsetup remove crypt

Change-Id: Ie247fd41b11adcf8520de0b1554568a1b7155860
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
parent e33fa31e
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -76,6 +76,20 @@ submit_from_crypt_cpus
    thread because it benefits CFQ to have writes submitted using the
    same context.

sector_size:<bytes>
    Use <bytes> as the encryption unit instead of 512 bytes sectors.
    This option can be in range 512 - 4096 bytes and must be power of two.
    Virtual device will announce this size as a minimal IO and logical sector.

iv_large_sectors
   IV generators will use sector number counted in <sector_size> units
   instead of default 512 bytes sectors.

   For example, if <sector_size> is 4096 bytes, plain64 IV for the second
   sector will be 8 (without flag) and 1 if iv_large_sectors is present.
   The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
   if this flag is specified.

Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
+69 −11
Original line number Diff line number Diff line
@@ -115,6 +115,10 @@ struct iv_tcw_private {
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };

enum cipher_flags {
	CRYPT_IV_LARGE_SECTORS,		/* Calculate IV from sector_size, not 512B sectors */
};

/*
 * The fields in here must be read only after initialization.
 */
@@ -150,11 +154,13 @@ struct crypt_config {
	} iv_gen_private;
	sector_t iv_offset;
	unsigned int iv_size;
	unsigned int sector_size;

	/* ESSIV: struct crypto_cipher *essiv_tfm */
	void *iv_private;
	struct crypto_skcipher **tfms;
	unsigned tfms_count;
	unsigned long cipher_flags;

	/*
	 * Layout of each crypto request:
@@ -484,6 +490,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
{
	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;

	if (cc->sector_size != (1 << SECTOR_SHIFT)) {
		ti->error = "Unsupported sector size for LMK";
		return -EINVAL;
	}

	lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
	if (IS_ERR(lmk->hash_tfm)) {
		ti->error = "Error initializing LMK hash";
@@ -633,6 +644,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
{
	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;

	if (cc->sector_size != (1 << SECTOR_SHIFT)) {
		ti->error = "Unsupported sector size for TCW";
		return -EINVAL;
	}

	if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
		ti->error = "Wrong key size for TCW";
		return -EINVAL;
@@ -846,21 +862,27 @@ static int crypt_convert_block(struct crypt_config *cc,
	u8 *iv;
	int r;

	/* Reject unexpected unaligned bio. */
	if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
		return -EIO;

	dmreq = dmreq_of_req(cc, req);
	iv = iv_of_dmreq(cc, dmreq);

	dmreq->iv_sector = ctx->cc_sector;
	if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
		sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
	dmreq->ctx = ctx;
	sg_init_table(&dmreq->sg_in, 1);
	sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
	sg_set_page(&dmreq->sg_in, bv_in.bv_page, cc->sector_size,
		    bv_in.bv_offset);

	sg_init_table(&dmreq->sg_out, 1);
	sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
	sg_set_page(&dmreq->sg_out, bv_out.bv_page, cc->sector_size,
		    bv_out.bv_offset);

	bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
	bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
	bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
	bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -869,7 +891,7 @@ static int crypt_convert_block(struct crypt_config *cc,
	}

	skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
				   1 << SECTOR_SHIFT, iv);
				   cc->sector_size, iv);

	if (bio_data_dir(ctx->bio_in) == WRITE)
		r = crypto_skcipher_encrypt(req);
@@ -919,6 +941,7 @@ static void crypt_free_req(struct crypt_config *cc,
static int crypt_convert(struct crypt_config *cc,
			 struct convert_context *ctx)
{
	unsigned int sector_step = cc->sector_size / (1 << SECTOR_SHIFT);
	int r;

	atomic_set(&ctx->cc_pending, 1);
@@ -926,7 +949,6 @@ static int crypt_convert(struct crypt_config *cc,
	while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {

		crypt_alloc_req(cc, ctx);

		atomic_inc(&ctx->cc_pending);

		r = crypt_convert_block(cc, ctx, ctx->req);
@@ -946,14 +968,14 @@ static int crypt_convert(struct crypt_config *cc,
		 */
		case -EINPROGRESS:
			ctx->req = NULL;
			ctx->cc_sector++;
			ctx->cc_sector += sector_step;
			continue;
		/*
		 * The request was already processed (synchronously).
		 */
		case 0:
			atomic_dec(&ctx->cc_pending);
			ctx->cc_sector++;
			ctx->cc_sector += sector_step;
			cond_resched();
			continue;

@@ -1743,7 +1765,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	char dummy;

	static struct dm_arg _args[] = {
		{0, 3, "Invalid number of feature args"},
		{0, 5, "Invalid number of feature args"},
	};

	if (argc < 5) {
@@ -1759,6 +1781,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		return -ENOMEM;
	}
	cc->key_size = key_size;
	cc->sector_size = (1 << SECTOR_SHIFT);

	ti->private = cc;
	ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1810,7 +1833,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	mutex_init(&cc->bio_alloc_lock);

	ret = -EINVAL;
	if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
	if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
	    (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
		ti->error = "Invalid iv_offset sector";
		goto bad;
	}
@@ -1858,6 +1882,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
			else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
				set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);

			else if (sscanf(opt_string, "sector_size:%u%c",
					&cc->sector_size, &dummy) == 1) {
				if (cc->sector_size < (1 << SECTOR_SHIFT) ||
				    cc->sector_size > 4096 ||
				    (1 << ilog2(cc->sector_size) != cc->sector_size)) {
					ti->error = "Invalid feature value for sector_size";
					goto bad;
				}
			} else if (!strcasecmp(opt_string, "iv_large_sectors"))
				set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
			else {
				ti->error = "Invalid feature arguments";
				goto bad;
@@ -1938,6 +1972,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
	    bio_data_dir(bio) == WRITE)
		dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));

	/*
	 * Ensure that bio is a multiple of internal sector encryption size
	 * and is aligned to this size as defined in IO hints.
	 */
	if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
		return -EIO;

	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
		return -EIO;

	io = dm_per_bio_data(bio, cc->per_bio_data_size);
	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
	io->ctx.req = (struct skcipher_request *)(io + 1);
@@ -1978,6 +2022,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
		num_feature_args += !!ti->num_discard_bios;
		num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
		num_feature_args += (cc->sector_size != (1 << SECTOR_SHIFT)) ? 1 : 0;
		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
		if (num_feature_args) {
			DMEMIT(" %d", num_feature_args);
			if (ti->num_discard_bios)
@@ -1986,6 +2032,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
				DMEMIT(" same_cpu_crypt");
			if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
				DMEMIT(" submit_from_crypt_cpus");
			if (cc->sector_size != (1 << SECTOR_SHIFT))
				DMEMIT(" sector_size:%d", cc->sector_size);
			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
				DMEMIT(" iv_large_sectors");
		}

		break;
@@ -2068,6 +2118,8 @@ static int crypt_iterate_devices(struct dm_target *ti,

static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
	struct crypt_config *cc = ti->private;

	/*
	 * Unfortunate constraint that is required to avoid the potential
	 * for exceeding underlying device's max_segments limits -- due to
@@ -2075,11 +2127,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
	 * bio that are not as physically contiguous as the original bio.
	 */
	limits->max_segment_size = PAGE_SIZE;

	if (cc->sector_size != (1 << SECTOR_SHIFT)) {
		limits->logical_block_size = cc->sector_size;
		limits->physical_block_size = cc->sector_size;
		blk_limits_io_min(limits, cc->sector_size);
	}
}

static struct target_type crypt_target = {
	.name   = "crypt",
	.version = {1, 14, 1},
	.version = {1, 17, 0},
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,