Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dec1cedf authored by Milan Broz's avatar Milan Broz Committed by Alasdair G Kergon
Browse files

dm crypt: abstract crypt_write_done



Process write request in separate function and queue
final bio through io workqueue.

Signed-off-by: default avatarMilan Broz <mbroz@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 0c395b0f
Loading
Loading
Loading
Loading
+25 −16
Original line number Original line Diff line number Diff line
@@ -577,18 +577,34 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)


static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
{
{
	struct bio *clone = io->ctx.bio_out;
	struct crypt_config *cc = io->target->private;

	if (unlikely(error < 0)) {
		crypt_free_buffer_pages(cc, clone);
		bio_put(clone);
		io->error = -EIO;
		crypt_dec_pending(io);
		return;
	}

	/* crypt_convert should have filled the clone bio */
	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);

	clone->bi_sector = cc->start + io->sector;
	io->sector += bio_sectors(clone);
}
}


static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
{
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
	struct bio *clone;
	unsigned remaining = base_bio->bi_size;
	unsigned remaining = io->base_bio->bi_size;
	int r;


	atomic_inc(&io->pending);
	atomic_inc(&io->pending);


	crypt_convert_init(cc, &io->ctx, NULL, base_bio, io->sector);
	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);


	/*
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * The allocated buffers can be smaller than the whole bio,
@@ -605,20 +621,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
		io->ctx.bio_out = clone;
		io->ctx.bio_out = clone;
		io->ctx.idx_out = 0;
		io->ctx.idx_out = 0;


		if (unlikely(crypt_convert(cc, &io->ctx) < 0)) {
		remaining -= clone->bi_size;
			crypt_free_buffer_pages(cc, clone);
			bio_put(clone);
			io->error = -EIO;
			crypt_dec_pending(io);
			return;
		}


		/* crypt_convert should have filled the clone bio */
		r = crypt_convert(cc, &io->ctx);
		BUG_ON(io->ctx.idx_out < clone->bi_vcnt);


		clone->bi_sector = cc->start + io->sector;
		kcryptd_crypt_write_io_submit(io, r);
		remaining -= clone->bi_size;
		if (unlikely(r < 0))
		io->sector += bio_sectors(clone);
			return;


		/* Grab another reference to the io struct
		/* Grab another reference to the io struct
		 * before we kick off the request */
		 * before we kick off the request */
@@ -631,7 +640,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
		 * may be gone already. */
		 * may be gone already. */


		/* out of memory -> run queues */
		/* out of memory -> run queues */
		if (remaining)
		if (unlikely(remaining))
			congestion_wait(WRITE, HZ/100);
			congestion_wait(WRITE, HZ/100);
	}
	}
}
}