Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e49c84c5 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Greg Kroah-Hartman
Browse files

dm cache: fix bugs when a GFP_NOWAIT allocation fails



commit 13bd677a472d534bf100bab2713efc3f9e3f5978 upstream.

GFP_NOWAIT allocation can fail anytime - it doesn't wait for memory being
available and it fails if the mempool is exhausted and there is not enough
memory.

If we go down this path:
  map_bio -> mg_start -> alloc_migration -> mempool_alloc(GFP_NOWAIT)
we can see that map_bio() doesn't check the return value of mg_start(),
and the bio is leaked.

If we go down this path:
  map_bio -> mg_start -> mg_lock_writes -> alloc_prison_cell ->
  dm_bio_prison_alloc_cell_v2 -> mempool_alloc(GFP_NOWAIT) ->
  mg_lock_writes -> mg_complete
the bio is ended with an error - it is unacceptable because it could
cause filesystem corruption if the machine ran out of memory
temporarily.

Change GFP_NOWAIT to GFP_NOIO, so that the mempool code will properly
wait until memory becomes available. mempool_alloc with GFP_NOIO can't
fail, so remove the code paths that deal with allocation failure.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5ce7528c
Loading
Loading
Loading
Loading
+2 −26
Original line number Diff line number Diff line
@@ -541,7 +541,7 @@ static void wake_migration_worker(struct cache *cache)

static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{
	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
}

static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -553,9 +553,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
	struct dm_cache_migration *mg;

	mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
	if (!mg)
		return NULL;
	mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);

	memset(mg, 0, sizeof(*mg));

@@ -663,10 +661,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
	struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;

	cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
	if (!cell_prealloc) {
		defer_bio(cache, bio);
		return false;
	}

	build_key(oblock, end, &key);
	r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1492,11 +1486,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
	struct dm_bio_prison_cell_v2 *prealloc;

	prealloc = alloc_prison_cell(cache);
	if (!prealloc) {
		DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
		mg_complete(mg, false);
		return -ENOMEM;
	}

	/*
	 * Prevent writes to the block, but allow reads to continue.
@@ -1534,11 +1523,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
	}

	mg = alloc_migration(cache);
	if (!mg) {
		policy_complete_background_work(cache->policy, op, false);
		background_work_end(cache);
		return -ENOMEM;
	}

	mg->op = op;
	mg->overwrite_bio = bio;
@@ -1627,10 +1611,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
	struct dm_bio_prison_cell_v2 *prealloc;

	prealloc = alloc_prison_cell(cache);
	if (!prealloc) {
		invalidate_complete(mg, false);
		return -ENOMEM;
	}

	build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
	r = dm_cell_lock_v2(cache->prison, &key,
@@ -1668,10 +1648,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
		return -EPERM;

	mg = alloc_migration(cache);
	if (!mg) {
		background_work_end(cache);
		return -ENOMEM;
	}

	mg->overwrite_bio = bio;
	mg->invalidate_cblock = cblock;