Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20efc30a authored by Satya Tangirala's avatar Satya Tangirala Committed by Alistair Delva
Browse files

BACKPORT: FROMLIST: block: Add encryption context to struct bio



We must have some way of letting a storage device driver know what
encryption context it should use for en/decrypting a request. However,
it's the filesystem/fscrypt that knows about and manages encryption
contexts. As such, when the filesystem layer submits a bio to the block
layer, and this bio eventually reaches a device driver with support for
inline encryption, the device driver will need to have been told the
encryption context for that bio.

We want to communicate the encryption context from the filesystem layer
to the storage device along with the bio, when the bio is submitted to the
block layer. To do this, we add a struct bio_crypt_ctx to struct bio, which
can represent an encryption context (note that we can't use the bi_private
field in struct bio to do this because that field does not function to pass
information across layers in the storage stack). We also introduce various
functions to manipulate the bio_crypt_ctx and make the bio/request merging
logic aware of the bio_crypt_ctx.

Bug: 137270441
Test: tested as series; see I26aac0ac7845a9064f28bb1421eb2522828a6dec
Change-Id: I16d99bb97f8cd7971cc11281a0d7120c5f87d83c
Signed-off-by: default avatarSatya Tangirala <satyat@google.com>
Link: https://patchwork.kernel.org/patch/11214719/
parent b0a4fb22
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -37,4 +37,4 @@ obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS)	+= blk-mq-debugfs.o
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL)	+= sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION)	+= keyslot-manager.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION)	+= keyslot-manager.o bio-crypt-ctx.o

block/bio-crypt-ctx.c

0 → 100644
+137 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2019 Google LLC
 */

#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/keyslot-manager.h>

static int num_prealloc_crypt_ctxs = 128;
static struct kmem_cache *bio_crypt_ctx_cache;
static mempool_t *bio_crypt_ctx_pool;

int bio_crypt_ctx_init(void)
{
	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
	if (!bio_crypt_ctx_cache)
		return -ENOMEM;

	bio_crypt_ctx_pool = mempool_create_slab_pool(
					num_prealloc_crypt_ctxs,
					bio_crypt_ctx_cache);

	if (!bio_crypt_ctx_pool)
		return -ENOMEM;

	return 0;
}

struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
{
	return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
}
EXPORT_SYMBOL(bio_crypt_alloc_ctx);

void bio_crypt_free_ctx(struct bio *bio)
{
	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
	bio->bi_crypt_context = NULL;
}
EXPORT_SYMBOL(bio_crypt_free_ctx);

int bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
{
	if (!bio_has_crypt_ctx(src))
		return 0;

	dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
	if (!dst->bi_crypt_context)
		return -ENOMEM;

	*dst->bi_crypt_context = *src->bi_crypt_context;

	if (bio_crypt_has_keyslot(src))
		keyslot_manager_get_slot(src->bi_crypt_context->processing_ksm,
					 src->bi_crypt_context->keyslot);

	return 0;
}
EXPORT_SYMBOL(bio_crypt_clone);

bool bio_crypt_should_process(struct bio *bio, struct request_queue *q)
{
	if (!bio_has_crypt_ctx(bio))
		return false;

	WARN_ON(!bio_crypt_has_keyslot(bio));
	return q->ksm == bio->bi_crypt_context->processing_ksm;
}
EXPORT_SYMBOL(bio_crypt_should_process);

/*
 * Checks that two bio crypt contexts are compatible - i.e. that
 * they are mergeable except for data_unit_num continuity.
 */
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
{
	struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
	struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;

	if (bio_has_crypt_ctx(b_1) != bio_has_crypt_ctx(b_2))
		return false;

	if (!bio_has_crypt_ctx(b_1))
		return true;

	return bc1->keyslot == bc2->keyslot &&
	       bc1->data_unit_size_bits == bc2->data_unit_size_bits;
}

/*
 * Checks that two bio crypt contexts are compatible, and also
 * that their data_unit_nums are continuous (and can hence be merged)
 */
bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
				  unsigned int b1_sectors,
				  struct bio *b_2)
{
	struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
	struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;

	if (!bio_crypt_ctx_compatible(b_1, b_2))
		return false;

	return !bio_has_crypt_ctx(b_1) ||
		(bc1->data_unit_num +
		(b1_sectors >> (bc1->data_unit_size_bits - 9)) ==
		bc2->data_unit_num);
}

void bio_crypt_ctx_release_keyslot(struct bio *bio)
{
	struct bio_crypt_ctx *crypt_ctx = bio->bi_crypt_context;

	keyslot_manager_put_slot(crypt_ctx->processing_ksm, crypt_ctx->keyslot);
	bio->bi_crypt_context->processing_ksm = NULL;
	bio->bi_crypt_context->keyslot = -1;
}

int bio_crypt_ctx_acquire_keyslot(struct bio *bio, struct keyslot_manager *ksm)
{
	int slot;
	enum blk_crypto_mode_num crypto_mode = bio_crypto_mode(bio);

	if (!ksm)
		return -ENOMEM;

	slot = keyslot_manager_get_slot_for_key(ksm,
			bio_crypt_raw_key(bio), crypto_mode,
			1 << bio->bi_crypt_context->data_unit_size_bits);
	if (slot < 0)
		return slot;

	bio_crypt_set_keyslot(bio, slot, ksm);
	return 0;
}
+10 −8
Original line number Diff line number Diff line
@@ -253,6 +253,7 @@ static void bio_free(struct bio *bio)
	struct bio_set *bs = bio->bi_pool;
	void *p;

	bio_crypt_free_ctx(bio);
	bio_uninit(bio);

	if (bs) {
@@ -632,15 +633,15 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)

	__bio_clone_fast(b, bio);

	if (bio_integrity(bio)) {
		int ret;

		ret = bio_integrity_clone(b, bio, gfp_mask);

		if (ret < 0) {
	if (bio_crypt_clone(b, bio, gfp_mask) < 0) {
		bio_put(b);
		return NULL;
	}

	if (bio_integrity(bio) &&
	    bio_integrity_clone(b, bio, gfp_mask) < 0) {
		bio_put(b);
		return NULL;
	}

	return b;
@@ -953,6 +954,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
	if (bio_integrity(bio))
		bio_integrity_advance(bio, bytes);

	bio_crypt_advance(bio, bytes);
	bio_advance_iter(bio, &bio->bi_iter, bytes);
}
EXPORT_SYMBOL(bio_advance);
+3 −0
Original line number Diff line number Diff line
@@ -3989,5 +3989,8 @@ int __init blk_dev_init(void)
	blk_debugfs_root = debugfs_create_dir("block", NULL);
#endif

	if (bio_crypt_ctx_init() < 0)
		panic("Failed to allocate mem for bio crypt ctxs\n");

	return 0;
}
+26 −3
Original line number Diff line number Diff line
@@ -495,6 +495,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
	if (blk_integrity_merge_bio(q, req, bio) == false)
		goto no_merge;

	if (WARN_ON_ONCE(!bio_crypt_ctx_compatible(bio, req->bio)))
		goto no_merge;

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
@@ -708,6 +711,11 @@ static struct request *attempt_merge(struct request_queue *q,
	if (req->write_hint != next->write_hint)
		return NULL;

	if (!bio_crypt_ctx_back_mergeable(req->bio, blk_rq_sectors(req),
					  next->bio)) {
		return NULL;
	}

	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
@@ -838,17 +846,32 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
	if (rq->write_hint != bio->bi_write_hint)
		return false;

	/* Only merge if the crypt contexts are compatible */
	if (!bio_crypt_ctx_compatible(bio, rq->bio))
		return false;

	return true;
}

enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
	if (req_op(rq) == REQ_OP_DISCARD &&
	    queue_max_discard_segments(rq->q) > 1)
	    queue_max_discard_segments(rq->q) > 1) {
		return ELEVATOR_DISCARD_MERGE;
	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
	} else if (blk_rq_pos(rq) + blk_rq_sectors(rq) ==
		   bio->bi_iter.bi_sector) {
		if (!bio_crypt_ctx_back_mergeable(rq->bio,
						  blk_rq_sectors(rq), bio)) {
			return ELEVATOR_NO_MERGE;
		}
		return ELEVATOR_BACK_MERGE;
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
	} else if (blk_rq_pos(rq) - bio_sectors(bio) ==
		   bio->bi_iter.bi_sector) {
		if (!bio_crypt_ctx_back_mergeable(bio,
						  bio_sectors(bio), rq->bio)) {
			return ELEVATOR_NO_MERGE;
		}
		return ELEVATOR_FRONT_MERGE;
	}
	return ELEVATOR_NO_MERGE;
}
Loading