Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20c82538 authored by Milan Broz's avatar Milan Broz Committed by Alasdair G Kergon
Browse files

dm crypt: use io thread for reads only if mempool exhausted



If there is enough memory, code can directly submit bio
instead queing this operation in separate thread.

Try to alloc bio clone with GFP_NOWAIT and only if it
fails use separate queue (map function cannot block here).

Signed-off-by: default avatarMilan Broz <mbroz@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent c0297721
Loading
Loading
Loading
Loading
+23 −14
Original line number Diff line number Diff line
@@ -787,26 +787,30 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
	clone->bi_destructor = dm_crypt_bio_destructor;
}

static void kcryptd_io_read(struct dm_crypt_io *io)
static void kcryptd_unplug(struct crypt_config *cc)
{
	blk_unplug(bdev_get_queue(cc->dev->bdev));
}

static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;

	crypt_inc_pending(io);

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
	if (unlikely(!clone)) {
		io->error = -ENOMEM;
		crypt_dec_pending(io);
		return;
	clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
	if (!clone) {
		kcryptd_unplug(cc);
		return 1;
	}

	crypt_inc_pending(io);

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
@@ -816,6 +820,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
	       sizeof(struct bio_vec) * clone->bi_vcnt);

	generic_make_request(clone);
	return 0;
}

static void kcryptd_io_write(struct dm_crypt_io *io)
@@ -828,9 +833,12 @@ static void kcryptd_io(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_io_read(io);
	else
	if (bio_data_dir(io->base_bio) == READ) {
		crypt_inc_pending(io);
		if (kcryptd_io_read(io, GFP_NOIO))
			io->error = -ENOMEM;
		crypt_dec_pending(io);
	} else
		kcryptd_io_write(io);
}

@@ -1424,9 +1432,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,

	io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));

	if (bio_data_dir(io->base_bio) == READ)
	if (bio_data_dir(io->base_bio) == READ) {
		if (kcryptd_io_read(io, GFP_NOWAIT))
			kcryptd_queue_io(io);
	else
	} else
		kcryptd_queue_crypt(io);

	return DM_MAPIO_SUBMITTED;