Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 807592a4 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

block: Let blk_drain_queue() caller obtain the queue lock



Let the caller of blk_drain_queue() obtain the queue lock to improve
readability of the patch called "Avoid that request_fn is invoked on
a dead queue".

Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3f3299d5
Loading
Loading
Loading
Loading
+18 −12
Original line number Original line Diff line number Diff line
@@ -349,7 +349,7 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
EXPORT_SYMBOL(blk_put_queue);


/**
/**
 * blk_drain_queue - drain requests from request_queue
 * __blk_drain_queue - drain requests from request_queue
 * @q: queue to drain
 * @q: queue to drain
 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
 *
 *
@@ -357,15 +357,17 @@ EXPORT_SYMBOL(blk_put_queue);
 * If not, only ELVPRIV requests are drained.  The caller is responsible
 * If not, only ELVPRIV requests are drained.  The caller is responsible
 * for ensuring that no new requests which need to be drained are queued.
 * for ensuring that no new requests which need to be drained are queued.
 */
 */
void blk_drain_queue(struct request_queue *q, bool drain_all)
static void __blk_drain_queue(struct request_queue *q, bool drain_all)
	__releases(q->queue_lock)
	__acquires(q->queue_lock)
{
{
	int i;
	int i;


	lockdep_assert_held(q->queue_lock);

	while (true) {
	while (true) {
		bool drain = false;
		bool drain = false;


		spin_lock_irq(q->queue_lock);

		/*
		/*
		 * The caller might be trying to drain @q before its
		 * The caller might be trying to drain @q before its
		 * elevator is initialized.
		 * elevator is initialized.
@@ -401,11 +403,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
			}
			}
		}
		}


		spin_unlock_irq(q->queue_lock);

		if (!drain)
		if (!drain)
			break;
			break;

		spin_unlock_irq(q->queue_lock);

		msleep(10);
		msleep(10);

		spin_lock_irq(q->queue_lock);
	}
	}


	/*
	/*
@@ -416,13 +421,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
	if (q->request_fn) {
	if (q->request_fn) {
		struct request_list *rl;
		struct request_list *rl;


		spin_lock_irq(q->queue_lock);

		blk_queue_for_each_rl(rl, q)
		blk_queue_for_each_rl(rl, q)
			for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
			for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
				wake_up_all(&rl->wait[i]);
				wake_up_all(&rl->wait[i]);

		spin_unlock_irq(q->queue_lock);
	}
	}
}
}


@@ -446,7 +447,10 @@ void blk_queue_bypass_start(struct request_queue *q)
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(q->queue_lock);


	if (drain) {
	if (drain) {
		blk_drain_queue(q, false);
		spin_lock_irq(q->queue_lock);
		__blk_drain_queue(q, false);
		spin_unlock_irq(q->queue_lock);

		/* ensure blk_queue_bypass() is %true inside RCU read lock */
		/* ensure blk_queue_bypass() is %true inside RCU read lock */
		synchronize_rcu();
		synchronize_rcu();
	}
	}
@@ -504,7 +508,9 @@ void blk_cleanup_queue(struct request_queue *q)
	mutex_unlock(&q->sysfs_lock);
	mutex_unlock(&q->sysfs_lock);


	/* drain all requests queued before DYING marking */
	/* drain all requests queued before DYING marking */
	blk_drain_queue(q, true);
	spin_lock_irq(lock);
	__blk_drain_queue(q, true);
	spin_unlock_irq(lock);


	/* @q won't process any more request, flush async actions */
	/* @q won't process any more request, flush async actions */
	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);