Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24ecfbe2 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: add blk_run_queue_async



Instead of overloading __blk_run_queue to force an offload to kblockd
add a new blk_run_queue_async helper to do it explicitly.  I've kept
the blk_queue_stopped check for now, but I suspect it's not needed
as the check we do when the workqueue items runs should be enough.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 4521cc4e
Loading
Loading
Loading
Loading
+24 −12
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)

	q = container_of(work, struct request_queue, delay_work.work);
	spin_lock_irq(q->queue_lock);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	spin_unlock_irq(q->queue_lock);
}

@@ -239,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
	WARN_ON(!irqs_disabled());

	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);

@@ -296,11 +296,9 @@ EXPORT_SYMBOL(blk_sync_queue);
 *
 * Description:
 *    See @blk_run_queue. This variant must be called with the queue lock
 *    held and interrupts disabled. If force_kblockd is true, then it is
 *    safe to call this without holding the queue lock.
 *
 *    held and interrupts disabled.
 */
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
void __blk_run_queue(struct request_queue *q)
{
	if (unlikely(blk_queue_stopped(q)))
		return;
@@ -309,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
	 * Only recurse once to avoid overrunning the stack, let the unplug
	 * handling reinvoke the handler shortly if we already got there.
	 */
	if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
		q->request_fn(q);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else
@@ -317,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
}
EXPORT_SYMBOL(__blk_run_queue);

/**
 * blk_run_queue_async - run a single device queue in workqueue context
 * @q:	The queue to run
 *
 * Description:
 *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
 *    of us.
 */
void blk_run_queue_async(struct request_queue *q)
{
	if (likely(!blk_queue_stopped(q)))
		queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}

/**
 * blk_run_queue - run a single device queue
 * @q: The queue to run
@@ -330,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -979,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
		blk_queue_end_tag(q, rq);

	add_acct_request(q, rq, where);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -1323,7 +1335,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
	} else {
		spin_lock_irq(q->queue_lock);
		add_acct_request(q, req, where);
		__blk_run_queue(q, false);
		__blk_run_queue(q);
out_unlock:
		spin_unlock_irq(q->queue_lock);
	}
@@ -2684,9 +2696,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
	 */
	if (from_schedule) {
		spin_unlock(q->queue_lock);
		__blk_run_queue(q, true);
		blk_run_queue_async(q);
	} else {
		__blk_run_queue(q, false);
		__blk_run_queue(q);
		spin_unlock(q->queue_lock);
	}

+1 −1
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
	WARN_ON(irqs_disabled());
	spin_lock_irq(q->queue_lock);
	__elv_add_request(q, rq, where);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	/* the queue is stopped so it won't be plugged+unplugged */
	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
		q->request_fn(q);
+2 −2
Original line number Diff line number Diff line
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
	 * request_fn may confuse the driver.  Always use kblockd.
	 */
	if (queued)
		__blk_run_queue(q, true);
		blk_run_queue_async(q);
}

/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
	 * the comment in flush_end_io().
	 */
	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
		__blk_run_queue(q, true);
		blk_run_queue_async(q);
}

/**
+1 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *);
void blk_run_queue_async(struct request_queue *q);

/*
 * Internal atomic flags for request handling
+3 −3
Original line number Diff line number Diff line
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			    cfqd->busy_queues > 1) {
				cfq_del_timer(cfqd, cfqq);
				cfq_clear_cfqq_wait_request(cfqq);
				__blk_run_queue(cfqd->queue, false);
				__blk_run_queue(cfqd->queue);
			} else {
				cfq_blkiocg_update_idle_time_stats(
						&cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		 * this new queue is RT and the current one is BE
		 */
		cfq_preempt_queue(cfqd, cfqq);
		__blk_run_queue(cfqd->queue, false);
		__blk_run_queue(cfqd->queue);
	}
}

@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
	struct request_queue *q = cfqd->queue;

	spin_lock_irq(q->queue_lock);
	__blk_run_queue(cfqd->queue, false);
	__blk_run_queue(cfqd->queue);
	spin_unlock_irq(q->queue_lock);
}

Loading