Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit daec338b authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm thin: add mappings to end of prepared_* lists



Mappings could be processed in descending logical block order,
particularly if buffered IO is used.  This could adversely affect the
latency of IO processing.  Fix this by adding mappings to the end of the
'prepared_mappings' and 'prepared_discards' lists.

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Acked-by: default avatarJoe Thornber <ejt@redhat.com>
parent 8d30abff
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -535,7 +535,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
	struct pool *pool = m->tc->pool;
	struct pool *pool = m->tc->pool;


	if (m->quiesced && m->prepared) {
	if (m->quiesced && m->prepared) {
		list_add(&m->list, &pool->prepared_mappings);
		list_add_tail(&m->list, &pool->prepared_mappings);
		wake_worker(pool);
		wake_worker(pool);
	}
	}
}
}
@@ -1058,7 +1058,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)


			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
				spin_lock_irqsave(&pool->lock, flags);
				spin_lock_irqsave(&pool->lock, flags);
				list_add(&m->list, &pool->prepared_discards);
				list_add_tail(&m->list, &pool->prepared_discards);
				spin_unlock_irqrestore(&pool->lock, flags);
				spin_unlock_irqrestore(&pool->lock, flags);
				wake_worker(pool);
				wake_worker(pool);
			}
			}
@@ -2919,7 +2919,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
		if (!list_empty(&work)) {
		if (!list_empty(&work)) {
			spin_lock_irqsave(&pool->lock, flags);
			spin_lock_irqsave(&pool->lock, flags);
			list_for_each_entry_safe(m, tmp, &work, list)
			list_for_each_entry_safe(m, tmp, &work, list)
				list_add(&m->list, &pool->prepared_discards);
				list_add_tail(&m->list, &pool->prepared_discards);
			spin_unlock_irqrestore(&pool->lock, flags);
			spin_unlock_irqrestore(&pool->lock, flags);
			wake_worker(pool);
			wake_worker(pool);
		}
		}