Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f97380bc authored by Olaf Kirch's avatar Olaf Kirch Committed by Linus Torvalds
Browse files

dm crypt: use smaller bvecs in clones



Allocate smaller clones

With the previous dm-crypt fixes, there is no need for the clone bios to have
the same bvec size as the original - we just need to make them big enough for
the remaining number of pages.  The only requirement is that we clear the
"out" index in convert_context, so that crypt_convert starts storing data at
the right position within the clone bio.

Signed-off-by: default avatarOlaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2f9941b6
Loading
Loading
Loading
Loading
+8 −21
Original line number Diff line number Diff line
@@ -379,8 +379,7 @@ static int crypt_convert(struct crypt_config *cc,
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
				      unsigned int *bio_vec_idx)
static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
@@ -394,16 +393,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,

	clone_init(io, clone);

	/* if the last bio was not complete, continue where that one ended */
	clone->bi_idx = *bio_vec_idx;
	clone->bi_vcnt = *bio_vec_idx;
	clone->bi_size = 0;
	clone->bi_flags &= ~(1 << BIO_SEG_VALID);

	/* clone->bi_idx pages have already been allocated */
	size -= clone->bi_idx * PAGE_SIZE;

	for (i = clone->bi_idx; i < nr_iovecs; i++) {
	for (i = 0; i < nr_iovecs; i++) {
		struct bio_vec *bv = bio_iovec_idx(clone, i);

		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -415,7 +405,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
		if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1))
		if (i == (MIN_BIO_PAGES - 1))
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

		bv->bv_offset = 0;
@@ -434,12 +424,6 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
		return NULL;
	}

	/*
	 * Remember the last bio_vec allocated to be able
	 * to correctly continue after the splitting.
	 */
	*bio_vec_idx = clone->bi_vcnt;

	return clone;
}

@@ -597,7 +581,6 @@ static void process_write(struct crypt_io *io)
	struct convert_context ctx;
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;
	unsigned bvec_idx = 0;

	atomic_inc(&io->pending);

@@ -608,13 +591,14 @@ static void process_write(struct crypt_io *io)
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
		clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx);
		clone = crypt_alloc_buffer(io, remaining);
		if (unlikely(!clone)) {
			dec_pending(io, -ENOMEM);
			return;
		}

		ctx.bio_out = clone;
		ctx.idx_out = 0;

		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
			crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -623,6 +607,9 @@ static void process_write(struct crypt_io *io)
			return;
		}

		/* crypt_convert should have filled the clone bio */
		BUG_ON(ctx.idx_out < clone->bi_vcnt);

		clone->bi_sector = cc->start + sector;
		remaining -= clone->bi_size;
		sector += bio_sectors(clone);