Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6ecf23af authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

block: extend queue bypassing to cover blkcg policies



Extend queue bypassing such that dying queue is always bypassing and
blk-throttle is drained on bypass.  With blkcg policies updated to
test blk_queue_bypass() instead of blk_queue_dead(), this ensures that
no bio or request is held by or going through blkcg policies on a
bypassing queue.

This will be used to implement blkg cleanup on elevator switches and
policy changes.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d732580b
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -372,7 +372,6 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
		if (q->elevator)
			elv_drain_elevator(q);

		if (drain_all)
		blk_throtl_drain(q);

		/*
@@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
 *
 * In bypass mode, only the dispatch FIFO queue of @q is used.  This
 * function makes @q enter bypass mode and drains all requests which were
 * issued before.  On return, it's guaranteed that no request has ELVPRIV
 * set.
 * throttled or issued before.  On return, it's guaranteed that no request
 * is being throttled or has ELVPRIV set.
 */
void blk_queue_bypass_start(struct request_queue *q)
{
@@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q)
	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);

	spin_lock_irq(lock);

	/* dead queue is permanently in bypass mode till released */
	q->bypass_depth++;
	queue_flag_set(QUEUE_FLAG_BYPASS, q);

	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
	queue_flag_set(QUEUE_FLAG_DEAD, q);
+2 −2
Original line number Diff line number Diff line
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
	struct request_queue *q = td->queue;

	/* no throttling for dead queue */
	if (unlikely(blk_queue_dead(q)))
	if (unlikely(blk_queue_bypass(q)))
		return NULL;

	rcu_read_lock();
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
	spin_lock_irq(q->queue_lock);

	/* Make sure @q is still alive */
	if (unlikely(blk_queue_dead(q))) {
	if (unlikely(blk_queue_bypass(q))) {
		kfree(tg);
		return NULL;
	}