Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6cf4cc8f authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer
Browse files

dm cache policy smq: stop preemptively demoting blocks



It causes a lot of churn if the working set's size is close to the fast
device's size.

Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 4d44ec5a
Loading
Loading
Loading
Loading
+5 −12
Original line number Diff line number Diff line
@@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
		percent_to_target(mq, CLEAN_TARGET);
}

static bool free_target_met(struct smq_policy *mq, bool idle)
static bool free_target_met(struct smq_policy *mq)
{
	unsigned nr_free;

	if (!idle)
		return true;

	nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
	return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
		percent_to_target(mq, FREE_TARGET);
@@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
		 * We always claim to be 'idle' to ensure some demotions happen
		 * with continuous loads.
		 */
		if (!free_target_met(mq, true))
		if (!free_target_met(mq))
			queue_demotion(mq);
		return;
	}
@@ -1421,15 +1418,11 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
	spin_lock_irqsave(&mq->lock, flags);
	r = btracker_issue(mq->bg_work, result);
	if (r == -ENODATA) {
		/* find some writeback work to do */
		if (mq->migrations_allowed && !free_target_met(mq, idle))
			queue_demotion(mq);

		else if (!clean_target_met(mq, idle))
		if (!clean_target_met(mq, idle)) {
			queue_writeback(mq);

			r = btracker_issue(mq->bg_work, result);
		}
	}
	spin_unlock_irqrestore(&mq->lock, flags);

	return r;