Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8a2d5286 authored by Mike Snitzer's avatar Mike Snitzer Committed by Alasdair G Kergon
Browse files

dm snapshot: merge consecutive chunks together



s->store->type->prepare_merge returns the number of chunks that can be
copied linearly working backwards from the returned chunk number.

For example, if it returns 3 chunks with old_chunk == 10 and new_chunk
== 20, then chunk 20 can be copied to 10, chunk 19 to 9 and 18 to 8.

Until now kcopyd only copied one chunk at a time.  This patch now copies
the full set at once.

Consequently, snapshot_merge_process() needs to delay the merging of all
chunks if any have writes in progress, not just the first chunk in the
region that is to be merged.

snapshot-merge's performance is now comparable to the original
snapshot-origin target.

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 73dfd078
Loading
Loading
Loading
Loading
+21 −10
Original line number Original line Diff line number Diff line
@@ -879,9 +879,10 @@ static void increment_pending_exceptions_done_count(void)


static void snapshot_merge_next_chunks(struct dm_snapshot *s)
static void snapshot_merge_next_chunks(struct dm_snapshot *s)
{
{
	int r;
	int i, linear_chunks;
	chunk_t old_chunk, new_chunk;
	chunk_t old_chunk, new_chunk;
	struct dm_io_region src, dest;
	struct dm_io_region src, dest;
	sector_t io_size;
	uint64_t previous_count;
	uint64_t previous_count;


	BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
	BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
@@ -896,20 +897,28 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
		goto shut;
		goto shut;
	}
	}


	r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
	linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
	if (r <= 0) {
						      &new_chunk);
		if (r < 0)
	if (linear_chunks <= 0) {
		if (linear_chunks < 0)
			DMERR("Read error in exception store: "
			DMERR("Read error in exception store: "
			      "shutting down merge");
			      "shutting down merge");
		goto shut;
		goto shut;
	}
	}


	/* TODO: use larger I/O size once we verify that kcopyd handles it */
	/* Adjust old_chunk and new_chunk to reflect start of linear region */
	old_chunk = old_chunk + 1 - linear_chunks;
	new_chunk = new_chunk + 1 - linear_chunks;

	/*
	 * Use one (potentially large) I/O to copy all 'linear_chunks'
	 * from the exception store to the origin
	 */
	io_size = linear_chunks * s->store->chunk_size;


	dest.bdev = s->origin->bdev;
	dest.bdev = s->origin->bdev;
	dest.sector = chunk_to_sector(s->store, old_chunk);
	dest.sector = chunk_to_sector(s->store, old_chunk);
	dest.count = min((sector_t)s->store->chunk_size,
	dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
			 get_dev_size(dest.bdev) - dest.sector);


	src.bdev = s->cow->bdev;
	src.bdev = s->cow->bdev;
	src.sector = chunk_to_sector(s->store, new_chunk);
	src.sector = chunk_to_sector(s->store, new_chunk);
@@ -925,7 +934,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
	 * significant impact on performance.
	 * significant impact on performance.
	 */
	 */
	previous_count = read_pending_exceptions_done_count();
	previous_count = read_pending_exceptions_done_count();
	while (origin_write_extent(s, dest.sector, s->store->chunk_size)) {
	while (origin_write_extent(s, dest.sector, io_size)) {
		wait_event(_pending_exceptions_done,
		wait_event(_pending_exceptions_done,
			   (read_pending_exceptions_done_count() !=
			   (read_pending_exceptions_done_count() !=
			    previous_count));
			    previous_count));
@@ -935,10 +944,12 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)


	down_write(&s->lock);
	down_write(&s->lock);
	s->first_merging_chunk = old_chunk;
	s->first_merging_chunk = old_chunk;
	s->num_merging_chunks = 1;
	s->num_merging_chunks = linear_chunks;
	up_write(&s->lock);
	up_write(&s->lock);


	__check_for_conflicting_io(s, old_chunk);
	/* Wait until writes to all 'linear_chunks' drain */
	for (i = 0; i < linear_chunks; i++)
		__check_for_conflicting_io(s, old_chunk + i);


	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
	return;
	return;