Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c246e80d authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

block: Avoid that request_fn is invoked on a dead queue



A block driver may start cleaning up resources needed by its
request_fn as soon as blk_cleanup_queue() finished, so request_fn
must not be invoked after draining finished. This is important
when blk_run_queue() is invoked without any requests in progress.
As an example, if blk_drain_queue() and scsi_run_queue() run in
parallel, blk_drain_queue() may have finished all requests after
scsi_run_queue() has taken a SCSI device off the starved list but
before that last function has had a chance to run the queue.

Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Chanho Min <chanho.min@lge.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 807592a4
Loading
Loading
Loading
Loading
+27 −4
Original line number Diff line number Diff line
@@ -292,6 +292,25 @@ void blk_sync_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_sync_queue);

/**
 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
 * @q:	The queue to run
 *
 * Description:
 *    Invoke request handling on a queue if there are any pending requests.
 *    May be used to restart request handling after a request has completed.
 *    This variant runs the queue whether or not the queue has been
 *    stopped. Must be called with the queue lock held and interrupts
 *    disabled. See also @blk_run_queue.
 */
inline void __blk_run_queue_uncond(struct request_queue *q)
{
	if (unlikely(blk_queue_dead(q)))
		return;

	q->request_fn(q);
}

/**
 * __blk_run_queue - run a single device queue
 * @q:	The queue to run
@@ -305,7 +324,7 @@ void __blk_run_queue(struct request_queue *q)
	if (unlikely(blk_queue_stopped(q)))
		return;

	q->request_fn(q);
	__blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);

@@ -477,8 +496,8 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
 * Mark @q DYING, drain all pending requests, destroy and put it.  All
 * future requests will be failed immediately with -ENODEV.
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
 */
void blk_cleanup_queue(struct request_queue *q)
{
@@ -507,9 +526,13 @@ void blk_cleanup_queue(struct request_queue *q)
	spin_unlock_irq(lock);
	mutex_unlock(&q->sysfs_lock);

	/* drain all requests queued before DYING marking */
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
	 * prevent that q->request_fn() gets invoked after draining finished.
	 */
	spin_lock_irq(lock);
	__blk_drain_queue(q, true);
	queue_flag_set(QUEUE_FLAG_DEAD, q);
	spin_unlock_irq(lock);

	/* @q won't process any more request, flush async actions */
+1 −1
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
	__blk_run_queue(q);
	/* the queue is stopped so it won't be run */
	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
		q->request_fn(q);
		__blk_run_queue_uncond(q);
	spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+2 −0
Original line number Diff line number Diff line
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);

void blk_queue_congestion_threshold(struct request_queue *q);

void __blk_run_queue_uncond(struct request_queue *q);

int blk_dev_init(void);


+2 −0
Original line number Diff line number Diff line
@@ -452,6 +452,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD  17	/* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */
#define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */

#define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -522,6 +523,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q)	\