Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b01c73ea authored by Satya Tangirala's avatar Satya Tangirala
Browse files

BACKPORT: FROMLIST: Update Inline Encryption from v5 to v6 of patch series

Changes v5 => v6:
 - Blk-crypto's kernel crypto API fallback is no longer restricted to
   8-byte DUNs. It's also now separately configurable from blk-crypto, and
   can be disabled entirely, while still allowing the kernel to use inline
   encryption hardware. Further, struct bio_crypt_ctx takes up less space,
   and no longer contains the information needed by the crypto API
   fallback - the fallback allocates the required memory when necessary.
 - Blk-crypto now supports all file content encryption modes supported by
   fscrypt.
 - Fixed bio merging logic in blk-merge.c
 - Fscrypt now supports inline encryption with the direct key policy, since
   blk-crypto now has support for larger DUNs.
 - Keyslot manager now uses a hashtable to lookup which keyslot contains
   any particular key (thanks Eric!)
 - Fscrypt support for inline encryption now handles filesystems with
   multiple underlying block devices (thanks Eric!)
 - Numerous cleanups

Bug: 137270441
Test: refer to I26376479ee38259b8c35732cb3a1d7e15f9b05a3
Change-Id: I13e2e327e0b4784b394cb1e7cf32a04856d95f01
Link: https://lore.kernel.org/linux-block/20191218145136.172774-1-satyat@google.com/


Signed-off-by: default avatarSatya Tangirala <satyat@google.com>
parent 5da11144
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -97,7 +97,7 @@ Blk-crypto ensures that:

- The bio's encryption context is programmed into a keyslot in the KSM of the
  request queue that the bio is being submitted to (or the crypto API fallback
  KSM if the request queue doesn't have a KSM), and that the ``processing_ksm``
  KSM if the request queue doesn't have a KSM), and that the ``bc_ksm``
  in the ``bi_crypt_context`` is set to this KSM

- That the bio has its own individual reference to the keyslot in this KSM.
@@ -107,7 +107,7 @@ Blk-crypto ensures that:
  ensuring that the bio has a valid reference to the keyslot when, for e.g., the
  crypto API fallback KSM in blk-crypto performs crypto on the device's behalf.
  The individual references are ensured by increasing the refcount for the
  keyslot in the ``processing_ksm`` when a bio with a programmed encryption
  keyslot in the ``bc_ksm`` when a bio with a programmed encryption
  context is cloned.


@@ -120,7 +120,7 @@ been programmed into any keyslot in any KSM (for e.g. a bio from the FS).
  request queue the bio is being submitted to (and if this KSM does not exist,
  then it will program it into blk-crypto's internal KSM for crypto API
  fallback). The KSM that this encryption context was programmed into is stored
  as the ``processing_ksm`` in the bio's ``bi_crypt_context``.
  as the ``bc_ksm`` in the bio's ``bi_crypt_context``.

**Case 2:** blk-crypto is given a bio whose encryption context has already been
programmed into a keyslot in the *crypto API fallback* KSM.
@@ -138,7 +138,7 @@ KSM).
This way, when a device driver is processing a bio, it can be sure that
the bio's encryption context has been programmed into some KSM (either the
device driver's request queue's KSM, or blk-crypto's crypto API fallback KSM).
It then simply needs to check if the bio's processing_ksm is the device's
It then simply needs to check if the bio's ``bc_ksm`` is the device's
request queue's KSM. If so, then it should proceed with IE. If not, it should
simply do nothing with respect to crypto, because some other KSM (perhaps the
blk-crypto crypto API fallback KSM) is handling the en/decryption.
+17761 −17728

File changed.

Preview size limit exceeded, changes collapsed.

+11 −4
Original line number Diff line number Diff line
@@ -202,13 +202,20 @@ config BLK_SED_OPAL

config BLK_INLINE_ENCRYPTION
	bool "Enable inline encryption support in block layer"
	help
	  Build the blk-crypto subsystem. Enabling this lets the
	  block layer handle encryption, so users can take
	  advantage of inline encryption hardware if present.

config BLK_INLINE_ENCRYPTION_FALLBACK
	bool "Enable crypto API fallback for blk-crypto"
	depends on BLK_INLINE_ENCRYPTION
	select CRYPTO
	select CRYPTO_BLKCIPHER
	help
	  Build the blk-crypto subsystem.
	  Enabling this lets the block layer handle encryption,
	  so users can take advantage of inline encryption
	  hardware if present.
	  Enabling this lets the block layer handle inline encryption
	  by falling back to the kernel crypto API when inline
	  encryption hardware is not present.

menu "Partition Types"

+1 −0
Original line number Diff line number Diff line
@@ -39,3 +39,4 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL)	+= sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION)	+= keyslot-manager.o bio-crypt-ctx.o \
					   blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK)	+= blk-crypto-fallback.o
 No newline at end of file
+55 −60
Original line number Diff line number Diff line
@@ -5,26 +5,43 @@

#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/keyslot-manager.h>
#include <linux/module.h>
#include <linux/slab.h>

#include "blk-crypto-internal.h"

static int num_prealloc_crypt_ctxs = 128;

module_param(num_prealloc_crypt_ctxs, int, 0444);
MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
		"Number of bio crypto contexts to preallocate");

static struct kmem_cache *bio_crypt_ctx_cache;
static mempool_t *bio_crypt_ctx_pool;

int bio_crypt_ctx_init(void)
int __init bio_crypt_ctx_init(void)
{
	size_t i;

	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
	if (!bio_crypt_ctx_cache)
		return -ENOMEM;

	bio_crypt_ctx_pool = mempool_create_slab_pool(
					num_prealloc_crypt_ctxs,
	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
						      bio_crypt_ctx_cache);

	if (!bio_crypt_ctx_pool)
		return -ENOMEM;

	/* This is assumed in various places. */
	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);

	/* Sanity check that no algorithm exceeds the defined limits. */
	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
	}

	return 0;
}

@@ -32,51 +49,43 @@ struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
{
	return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
}
EXPORT_SYMBOL(bio_crypt_alloc_ctx);

void bio_crypt_free_ctx(struct bio *bio)
{
	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
	bio->bi_crypt_context = NULL;
}
EXPORT_SYMBOL(bio_crypt_free_ctx);

int bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
{
	const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;

	/*
	 * If a bio is swhandled, then it will be decrypted when bio_endio
	 * is called. As we only want the data to be decrypted once, copies
	 * of the bio must not have have a crypt context.
	 * If a bio is fallback_crypted, then it will be decrypted when
	 * bio_endio is called. As we only want the data to be decrypted once,
	 * copies of the bio must not have have a crypt context.
	 */
	if (!bio_has_crypt_ctx(src) || bio_crypt_swhandled(src))
		return 0;
	if (!src_bc || bio_crypt_fallback_crypted(src_bc))
		return;

	dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
	if (!dst->bi_crypt_context)
		return -ENOMEM;

	*dst->bi_crypt_context = *src->bi_crypt_context;
	*dst->bi_crypt_context = *src_bc;

	if (bio_crypt_has_keyslot(src))
		keyslot_manager_get_slot(src->bi_crypt_context->processing_ksm,
					 src->bi_crypt_context->keyslot);

	return 0;
	if (src_bc->bc_keyslot >= 0)
		keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
}
EXPORT_SYMBOL(bio_crypt_clone);
EXPORT_SYMBOL_GPL(bio_crypt_clone);

bool bio_crypt_should_process(struct bio *bio, struct request_queue *q)
bool bio_crypt_should_process(struct request *rq)
{
	if (!bio_has_crypt_ctx(bio))
		return false;
	struct bio *bio = rq->bio;

	if (q->ksm != bio->bi_crypt_context->processing_ksm)
	if (!bio || !bio->bi_crypt_context)
		return false;

	WARN_ON(!bio_crypt_has_keyslot(bio));
	return true;
	return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
}
EXPORT_SYMBOL(bio_crypt_should_process);
EXPORT_SYMBOL_GPL(bio_crypt_should_process);

/*
 * Checks that two bio crypt contexts are compatible - i.e. that
@@ -87,22 +96,18 @@ bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
	struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
	struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;

	if (bio_has_crypt_ctx(b_1) != bio_has_crypt_ctx(b_2))
	if (bc1 != bc2)
		return false;

	if (!bio_has_crypt_ctx(b_1))
		return true;

	return bc1->keyslot == bc2->keyslot &&
	       bc1->data_unit_size_bits == bc2->data_unit_size_bits;
	return !bc1 || bc1->bc_key == bc2->bc_key;
}

/*
 * Checks that two bio crypt contexts are compatible, and also
 * that their data_unit_nums are continuous (and can hence be merged)
 * in the order b_1 followed by b_2.
 */
bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
				  unsigned int b1_sectors,
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
			     struct bio *b_2)
{
	struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
@@ -111,35 +116,25 @@ bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
	if (!bio_crypt_ctx_compatible(b_1, b_2))
		return false;

	return !bio_has_crypt_ctx(b_1) ||
		(bc1->data_unit_num +
		(b1_sectors >> (bc1->data_unit_size_bits - 9)) ==
		bc2->data_unit_num);
	return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
}

void bio_crypt_ctx_release_keyslot(struct bio *bio)
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
{
	struct bio_crypt_ctx *crypt_ctx = bio->bi_crypt_context;

	keyslot_manager_put_slot(crypt_ctx->processing_ksm, crypt_ctx->keyslot);
	bio->bi_crypt_context->processing_ksm = NULL;
	bio->bi_crypt_context->keyslot = -1;
	keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
	bc->bc_ksm = NULL;
	bc->bc_keyslot = -1;
}

int bio_crypt_ctx_acquire_keyslot(struct bio *bio, struct keyslot_manager *ksm)
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
				  struct keyslot_manager *ksm)
{
	int slot;
	enum blk_crypto_mode_num crypto_mode = bio_crypto_mode(bio);

	if (!ksm)
		return -ENOMEM;
	int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);

	slot = keyslot_manager_get_slot_for_key(ksm,
			bio_crypt_raw_key(bio), crypto_mode,
			1 << bio->bi_crypt_context->data_unit_size_bits);
	if (slot < 0)
		return slot;

	bio_crypt_set_keyslot(bio, slot, ksm);
	bc->bc_keyslot = slot;
	bc->bc_ksm = ksm;
	return 0;
}
Loading