Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1654e741 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

block: add @force_kblockd to __blk_run_queue()



__blk_run_queue() automatically either calls q->request_fn() directly
or schedules kblockd depending on whether the function is recursed.
blk-flush implementation needs to be able to explicitly choose
kblockd.  Add @force_kblockd.

All the current users are converted to specify %false for the
parameter and this patch doesn't introduce any behavior change.

stable: This is prerequisite for fixing ide oops caused by the new
        blk-flush implementation.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: stable@kernel.org
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 291d24f6
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
	WARN_ON(!irqs_disabled());

	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
	__blk_run_queue(q);
	__blk_run_queue(q, false);
}
EXPORT_SYMBOL(blk_start_queue);

@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
 * __blk_run_queue - run a single device queue
 * @q:	The queue to run
 * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
 *
 * Description:
 *    See @blk_run_queue. This variant must be called with the queue lock
 *    held and interrupts disabled.
 *
 */
void __blk_run_queue(struct request_queue *q)
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
	blk_remove_plug(q);

@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
	 * Only recurse once to avoid overrunning the stack, let the unplug
	 * handling reinvoke the handler shortly if we already got there.
	 */
	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
	if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
		q->request_fn(q);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_run_queue(q);
	__blk_run_queue(q, false);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,

	drive_stat_acct(rq, 1);
	__elv_add_request(q, rq, where, 0);
	__blk_run_queue(q);
	__blk_run_queue(q, false);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
+1 −1
Original line number Diff line number Diff line
@@ -69,7 +69,7 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
	 * queue.  Kick the queue in those cases.
	 */
	if (was_empty && next_rq)
		__blk_run_queue(q);
		__blk_run_queue(q, false);
}

static void pre_flush_end_io(struct request *rq, int error)
+3 −3
Original line number Diff line number Diff line
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			    cfqd->busy_queues > 1) {
				cfq_del_timer(cfqd, cfqq);
				cfq_clear_cfqq_wait_request(cfqq);
				__blk_run_queue(cfqd->queue);
				__blk_run_queue(cfqd->queue, false);
			} else {
				cfq_blkiocg_update_idle_time_stats(
						&cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		 * this new queue is RT and the current one is BE
		 */
		cfq_preempt_queue(cfqd, cfqq);
		__blk_run_queue(cfqd->queue);
		__blk_run_queue(cfqd->queue, false);
	}
}

@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
	struct request_queue *q = cfqd->queue;

	spin_lock_irq(q->queue_lock);
	__blk_run_queue(cfqd->queue);
	__blk_run_queue(cfqd->queue, false);
	spin_unlock_irq(q->queue_lock);
}

+2 −2
Original line number Diff line number Diff line
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
	 */
	elv_drain_elevator(q);
	while (q->rq.elvpriv) {
		__blk_run_queue(q);
		__blk_run_queue(q, false);
		spin_unlock_irq(q->queue_lock);
		msleep(10);
		spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
		 *   with anything.  There's no point in delaying queue
		 *   processing.
		 */
		__blk_run_queue(q);
		__blk_run_queue(q, false);
		break;

	case ELEVATOR_INSERT_SORT:
+1 −1
Original line number Diff line number Diff line
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
					&sdev->request_queue->queue_flags);
		if (flagset)
			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
		__blk_run_queue(sdev->request_queue);
		__blk_run_queue(sdev->request_queue, false);
		if (flagset)
			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
		spin_unlock(sdev->request_queue->queue_lock);
Loading