Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cf2f1abf authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer
Browse files

dm crypt: don't allocate pages for a partial request



Change crypt_alloc_buffer so that it only ever allocates pages for a
full request.  This is a prerequisite for the commit "dm crypt: offload
writes to thread".

This change simplifies the dm-crypt code at the expense of reduced
throughput in low memory conditions (where allocation for a partial
request is most useful).

Note: the next commit ("dm crypt: avoid deadlock in mempools") is needed
to fix a theoretical deadlock.

Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent f3396c58
Loading
Loading
Loading
Loading
+30 −109
Original line number Diff line number Diff line
@@ -58,7 +58,6 @@ struct dm_crypt_io {
	atomic_t io_pending;
	int error;
	sector_t sector;
	struct dm_crypt_io *base_io;
} CRYPTO_MINALIGN_ATTR;

struct dm_crypt_request {
@@ -172,7 +171,6 @@ struct crypt_config {
};

#define MIN_IOS        16
#define MIN_POOL_PAGES 32

static struct kmem_cache *_crypt_io_pool;

@@ -946,14 +944,13 @@ static int crypt_convert(struct crypt_config *cc,
	return 0;
}

static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);

/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages, indicated by
 * *out_of_pages set to 1.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
				      unsigned *out_of_pages)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
	struct crypt_config *cc = io->cc;
	struct bio *clone;
@@ -961,41 +958,27 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
	unsigned i, len;
	struct page *page;
	struct bio_vec *bvec;

	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
	if (!clone)
		return NULL;

	clone_init(io, clone);
	*out_of_pages = 0;

	for (i = 0; i < nr_iovecs; i++) {
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page) {
			*out_of_pages = 1;
			break;
		}

		/*
		 * If additional pages cannot be allocated without waiting,
		 * return a partially-allocated bio.  The caller will then try
		 * to allocate more bios while submitting this partial bio.
		 */
		gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

		if (!bio_add_page(clone, page, len, 0)) {
			mempool_free(page, cc->page_pool);
			break;
		}
		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
		bvec->bv_page = page;
		bvec->bv_len = len;
		bvec->bv_offset = 0;

		size -= len;
	}
		clone->bi_iter.bi_size += len;

	if (!clone->bi_iter.bi_size) {
		bio_put(clone);
		return NULL;
		size -= len;
	}

	return clone;
@@ -1020,7 +1003,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
	io->base_bio = bio;
	io->sector = sector;
	io->error = 0;
	io->base_io = NULL;
	io->ctx.req = NULL;
	atomic_set(&io->io_pending, 0);
}
@@ -1033,13 +1015,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 * If base_io is set, wait for the last fragment to complete.
 */
static void crypt_dec_pending(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->cc;
	struct bio *base_bio = io->base_bio;
	struct dm_crypt_io *base_io = io->base_io;
	int error = io->error;

	if (!atomic_dec_and_test(&io->io_pending))
@@ -1050,13 +1030,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
	if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
		mempool_free(io, cc->io_pool);

	if (likely(!base_io))
	bio_endio(base_bio, error);
	else {
		if (error && !base_io->error)
			base_io->error = error;
		crypt_dec_pending(base_io);
	}
}

/*
@@ -1192,10 +1166,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->cc;
	struct bio *clone;
	struct dm_crypt_io *new_io;
	int crypt_finished;
	unsigned out_of_pages = 0;
	unsigned remaining = io->base_bio->bi_iter.bi_size;
	sector_t sector = io->sector;
	int r;

@@ -1205,80 +1176,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
	crypt_inc_pending(io);
	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);

	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
	if (unlikely(!clone)) {
			io->error = -ENOMEM;
			break;
		io->error = -EIO;
		goto dec;
	}

	io->ctx.bio_out = clone;
	io->ctx.iter_out = clone->bi_iter;

		remaining -= clone->bi_iter.bi_size;
	sector += bio_sectors(clone);

	crypt_inc_pending(io);

	r = crypt_convert(cc, &io->ctx);
		if (r < 0)
	if (r)
		io->error = -EIO;

	crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);

	/* Encryption was already finished, submit io now */
	if (crypt_finished) {
		kcryptd_crypt_write_io_submit(io, 0);

			/*
			 * If there was an error, do not try next fragments.
			 * For async, error is processed in async handler.
			 */
			if (unlikely(r < 0))
				break;

		io->sector = sector;
	}

		/*
		 * Out of memory -> run queues
		 * But don't wait if split was due to the io size restriction
		 */
		if (unlikely(out_of_pages))
			congestion_wait(BLK_RW_ASYNC, HZ/100);

		/*
		 * With async crypto it is unsafe to share the crypto context
		 * between fragments, so switch to a new dm_crypt_io structure.
		 */
		if (unlikely(!crypt_finished && remaining)) {
			new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
			crypt_io_init(new_io, io->cc, io->base_bio, sector);
			crypt_inc_pending(new_io);
			crypt_convert_init(cc, &new_io->ctx, NULL,
					   io->base_bio, sector);
			new_io->ctx.iter_in = io->ctx.iter_in;

			/*
			 * Fragments after the first use the base_io
			 * pending count.
			 */
			if (!io->base_io)
				new_io->base_io = io;
			else {
				new_io->base_io = io->base_io;
				crypt_inc_pending(io->base_io);
				crypt_dec_pending(io);
			}

			io = new_io;
		}
	}

dec:
	crypt_dec_pending(io);
}

@@ -1746,7 +1667,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
		      ARCH_KMALLOC_MINALIGN);

	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
	cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
	if (!cc->page_pool) {
		ti->error = "Cannot allocate page mempool";
		goto bad;