Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a59db676 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer
Browse files

dm cache: fix problematic dual use of a single migration count variable



Introduce a new variable to count the number of allocated migration
structures.  The existing variable cache->nr_migrations became
overloaded.  It was used to:

 i) track of the number of migrations in flight for the purposes of
    quiescing during suspend.

 ii) to estimate the amount of background IO occuring.

Recent discard changes meant that REQ_DISCARD bios are processed with
a migration.  Discards are not background IO so nr_migrations was not
incremented.  However this could cause quiescing to complete early.

(i) is now handled with a new variable cache->nr_allocated_migrations.
cache->nr_migrations has been renamed cache->nr_io_migrations.
cleanup_migration() is now called free_io_migration(), since it
decrements that variable.

Also, remove the unused cache->next_migration variable that got replaced
with with prealloc_structs a while ago.

Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
parent 9b1cc9f2
Loading
Loading
Loading
Loading
+50 −39
Original line number Original line Diff line number Diff line
@@ -221,7 +221,13 @@ struct cache {
	struct list_head need_commit_migrations;
	struct list_head need_commit_migrations;
	sector_t migration_threshold;
	sector_t migration_threshold;
	wait_queue_head_t migration_wait;
	wait_queue_head_t migration_wait;
	atomic_t nr_migrations;
	atomic_t nr_allocated_migrations;

	/*
	 * The number of in flight migrations that are performing
	 * background io. eg, promotion, writeback.
	 */
	atomic_t nr_io_migrations;


	wait_queue_head_t quiescing_wait;
	wait_queue_head_t quiescing_wait;
	atomic_t quiescing;
	atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
	struct dm_deferred_set *all_io_ds;
	struct dm_deferred_set *all_io_ds;


	mempool_t *migration_pool;
	mempool_t *migration_pool;
	struct dm_cache_migration *next_migration;


	struct dm_cache_policy *policy;
	struct dm_cache_policy *policy;
	unsigned policy_nr_args;
	unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
	dm_bio_prison_free_cell(cache->prison, cell);
	dm_bio_prison_free_cell(cache->prison, cell);
}
}


static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
	struct dm_cache_migration *mg;

	mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
	if (mg) {
		mg->cache = cache;
		atomic_inc(&mg->cache->nr_allocated_migrations);
	}

	return mg;
}

static void free_migration(struct dm_cache_migration *mg)
{
	if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
		wake_up(&mg->cache->migration_wait);

	mempool_free(mg, mg->cache->migration_pool);
}

static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
{
{
	if (!p->mg) {
	if (!p->mg) {
		p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
		p->mg = alloc_migration(cache);
		if (!p->mg)
		if (!p->mg)
			return -ENOMEM;
			return -ENOMEM;
	}
	}
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
		free_prison_cell(cache, p->cell1);
		free_prison_cell(cache, p->cell1);


	if (p->mg)
	if (p->mg)
		mempool_free(p->mg, cache->migration_pool);
		free_migration(p->mg);
}
}


static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
 * Migration covers moving data from the origin device to the cache, or
 * Migration covers moving data from the origin device to the cache, or
 * vice versa.
 * vice versa.
 *--------------------------------------------------------------*/
 *--------------------------------------------------------------*/
static void free_migration(struct dm_cache_migration *mg)
static void inc_io_migrations(struct cache *cache)
{
	mempool_free(mg, mg->cache->migration_pool);
}

static void inc_nr_migrations(struct cache *cache)
{
{
	atomic_inc(&cache->nr_migrations);
	atomic_inc(&cache->nr_io_migrations);
}
}


static void dec_nr_migrations(struct cache *cache)
static void dec_io_migrations(struct cache *cache)
{
{
	atomic_dec(&cache->nr_migrations);
	atomic_dec(&cache->nr_io_migrations);

	/*
	 * Wake the worker in case we're suspending the target.
	 */
	wake_up(&cache->migration_wait);
}
}


static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
	wake_worker(cache);
	wake_worker(cache);
}
}


static void cleanup_migration(struct dm_cache_migration *mg)
static void free_io_migration(struct dm_cache_migration *mg)
{
{
	struct cache *cache = mg->cache;
	dec_io_migrations(mg->cache);
	free_migration(mg);
	free_migration(mg);
	dec_nr_migrations(cache);
}
}


static void migration_failure(struct dm_cache_migration *mg)
static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
		cell_defer(cache, mg->new_ocell, true);
		cell_defer(cache, mg->new_ocell, true);
	}
	}


	cleanup_migration(mg);
	free_io_migration(mg);
}
}


static void migration_success_pre_commit(struct dm_cache_migration *mg)
static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
	if (mg->writeback) {
	if (mg->writeback) {
		clear_dirty(cache, mg->old_oblock, mg->cblock);
		clear_dirty(cache, mg->old_oblock, mg->cblock);
		cell_defer(cache, mg->old_ocell, false);
		cell_defer(cache, mg->old_ocell, false);
		cleanup_migration(mg);
		free_io_migration(mg);
		return;
		return;


	} else if (mg->demote) {
	} else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
					     mg->old_oblock);
					     mg->old_oblock);
			if (mg->promote)
			if (mg->promote)
				cell_defer(cache, mg->new_ocell, true);
				cell_defer(cache, mg->new_ocell, true);
			cleanup_migration(mg);
			free_io_migration(mg);
			return;
			return;
		}
		}
	} else {
	} else {
		if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
		if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
			DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
			DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
			policy_remove_mapping(cache->policy, mg->new_oblock);
			policy_remove_mapping(cache->policy, mg->new_oblock);
			cleanup_migration(mg);
			free_io_migration(mg);
			return;
			return;
		}
		}
	}
	}
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
		} else {
		} else {
			if (mg->invalidate)
			if (mg->invalidate)
				policy_remove_mapping(cache->policy, mg->old_oblock);
				policy_remove_mapping(cache->policy, mg->old_oblock);
			cleanup_migration(mg);
			free_io_migration(mg);
		}
		}


	} else {
	} else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
			bio_endio(mg->new_ocell->holder, 0);
			bio_endio(mg->new_ocell->holder, 0);
			cell_defer(cache, mg->new_ocell, false);
			cell_defer(cache, mg->new_ocell, false);
		}
		}
		cleanup_migration(mg);
		free_io_migration(mg);
	}
	}
}
}


@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
	mg->new_ocell = cell;
	mg->new_ocell = cell;
	mg->start_jiffies = jiffies;
	mg->start_jiffies = jiffies;


	inc_nr_migrations(cache);
	inc_io_migrations(cache);
	quiesce_migration(mg);
	quiesce_migration(mg);
}
}


@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
	mg->new_ocell = NULL;
	mg->new_ocell = NULL;
	mg->start_jiffies = jiffies;
	mg->start_jiffies = jiffies;


	inc_nr_migrations(cache);
	inc_io_migrations(cache);
	quiesce_migration(mg);
	quiesce_migration(mg);
}
}


@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
	mg->new_ocell = new_ocell;
	mg->new_ocell = new_ocell;
	mg->start_jiffies = jiffies;
	mg->start_jiffies = jiffies;


	inc_nr_migrations(cache);
	inc_io_migrations(cache);
	quiesce_migration(mg);
	quiesce_migration(mg);
}
}


@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
	mg->new_ocell = NULL;
	mg->new_ocell = NULL;
	mg->start_jiffies = jiffies;
	mg->start_jiffies = jiffies;


	inc_nr_migrations(cache);
	inc_io_migrations(cache);
	quiesce_migration(mg);
	quiesce_migration(mg);
}
}


@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,


static bool spare_migration_bandwidth(struct cache *cache)
static bool spare_migration_bandwidth(struct cache *cache)
{
{
	sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
	sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
		cache->sectors_per_block;
		cache->sectors_per_block;
	return current_volume < cache->migration_threshold;
	return current_volume < cache->migration_threshold;
}
}
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)


static void wait_for_migrations(struct cache *cache)
static void wait_for_migrations(struct cache *cache)
{
{
	wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
	wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
}
}


static void stop_worker(struct cache *cache)
static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
{
{
	unsigned i;
	unsigned i;


	if (cache->next_migration)
		mempool_free(cache->next_migration, cache->migration_pool);

	if (cache->migration_pool)
	if (cache->migration_pool)
		mempool_destroy(cache->migration_pool);
		mempool_destroy(cache->migration_pool);


@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
	INIT_LIST_HEAD(&cache->quiesced_migrations);
	INIT_LIST_HEAD(&cache->quiesced_migrations);
	INIT_LIST_HEAD(&cache->completed_migrations);
	INIT_LIST_HEAD(&cache->completed_migrations);
	INIT_LIST_HEAD(&cache->need_commit_migrations);
	INIT_LIST_HEAD(&cache->need_commit_migrations);
	atomic_set(&cache->nr_migrations, 0);
	atomic_set(&cache->nr_allocated_migrations, 0);
	atomic_set(&cache->nr_io_migrations, 0);
	init_waitqueue_head(&cache->migration_wait);
	init_waitqueue_head(&cache->migration_wait);


	init_waitqueue_head(&cache->quiescing_wait);
	init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
		goto bad;
		goto bad;
	}
	}


	cache->next_migration = NULL;

	cache->need_tick_bio = true;
	cache->need_tick_bio = true;
	cache->sized = false;
	cache->sized = false;
	cache->invalidate = false;
	cache->invalidate = false;