Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 75ad23bc authored by Nick Piggin's avatar Nick Piggin Committed by Jens Axboe
Browse files

block: make queue flags non-atomic



We can save some atomic ops in the IO path, if we clearly define
the rules of how to modify the queue flags.

Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 68154e90
Loading
Loading
Loading
Loading
+26 −13
Original line number Diff line number Diff line
@@ -198,7 +198,8 @@ void blk_plug_device(struct request_queue *q)
	if (blk_queue_stopped(q))
		return;

	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
		__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
	}
@@ -213,9 +214,10 @@ int blk_remove_plug(struct request_queue *q)
{
	WARN_ON(!irqs_disabled());

	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
		return 0;

	queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
	del_timer(&q->unplug_timer);
	return 1;
}
@@ -311,15 +313,16 @@ void blk_start_queue(struct request_queue *q)
{
	WARN_ON(!irqs_disabled());

	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
	queue_flag_clear(QUEUE_FLAG_STOPPED, q);

	/*
	 * one level of recursion is ok and is much faster than kicking
	 * the unplug handling
	 */
	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
	if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
		queue_flag_set(QUEUE_FLAG_REENTER, q);
		q->request_fn(q);
		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else {
		blk_plug_device(q);
		kblockd_schedule_work(&q->unplug_work);
@@ -344,7 +347,7 @@ EXPORT_SYMBOL(blk_start_queue);
void blk_stop_queue(struct request_queue *q)
{
	blk_remove_plug(q);
	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
	queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);

@@ -373,11 +376,8 @@ EXPORT_SYMBOL(blk_sync_queue);
 * blk_run_queue - run a single device queue
 * @q:	The queue to run
 */
void blk_run_queue(struct request_queue *q)
void __blk_run_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	blk_remove_plug(q);

	/*
@@ -385,15 +385,28 @@ void blk_run_queue(struct request_queue *q)
	 * handling reinvoke the handler shortly if we already got there.
	 */
	if (!elv_queue_empty(q)) {
		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
		if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
			queue_flag_set(QUEUE_FLAG_REENTER, q);
			q->request_fn(q);
			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
			queue_flag_clear(QUEUE_FLAG_REENTER, q);
		} else {
			blk_plug_device(q);
			kblockd_schedule_work(&q->unplug_work);
		}
	}
}
EXPORT_SYMBOL(__blk_run_queue);

/**
 * blk_run_queue - run a single device queue
 * @q: The queue to run
 */
void blk_run_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_run_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -406,7 +419,7 @@ void blk_put_queue(struct request_queue *q)
void blk_cleanup_queue(struct request_queue *q)
{
	mutex_lock(&q->sysfs_lock);
	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
	mutex_unlock(&q->sysfs_lock);

	if (q->elevator)
+3 −3
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
	if (!rq->bio)
		return;

	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
	hw_seg_size = seg_size = 0;
	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
	rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
		return 0;

	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
	int nsegs, cluster;

	nsegs = 0;
	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);

	/*
	 * for each bio in rq
+1 −1
Original line number Diff line number Diff line
@@ -287,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
	t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
}
EXPORT_SYMBOL(blk_queue_stack_limits);

+4 −4
Original line number Diff line number Diff line
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
	__blk_free_tags(bqt);

	q->queue_tags = NULL;
	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
	queue_flag_clear(QUEUE_FLAG_QUEUED, q);
}

/**
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
 **/
void blk_queue_free_tags(struct request_queue *q)
{
	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
	queue_flag_clear(QUEUE_FLAG_QUEUED, q);
}
EXPORT_SYMBOL(blk_queue_free_tags);

@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
		rc = blk_queue_resize_tags(q, depth);
		if (rc)
			return rc;
		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
		queue_flag_set(QUEUE_FLAG_QUEUED, q);
		return 0;
	} else
		atomic_inc(&tags->refcnt);
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
	 * assign it, all done
	 */
	q->queue_tags = tags;
	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
	queue_flag_set(QUEUE_FLAG_QUEUED, q);
	INIT_LIST_HEAD(&q->tag_busy_list);
	return 0;
fail:
+10 −3
Original line number Diff line number Diff line
@@ -1070,7 +1070,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
	 */
	spin_lock_irq(q->queue_lock);

	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
	queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);

	elv_drain_elevator(q);

@@ -1104,7 +1104,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
	 * finally exit old elevator and turn off BYPASS.
	 */
	elevator_exit(old_elevator);
	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
	spin_lock_irq(q->queue_lock);
	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
	spin_unlock_irq(q->queue_lock);

	return 1;

fail_register:
@@ -1115,7 +1118,11 @@ fail_register:
	elevator_exit(e);
	q->elevator = old_elevator;
	elv_register_queue(q);
	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);

	spin_lock_irq(q->queue_lock);
	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
	spin_unlock_irq(q->queue_lock);

	return 0;
}

Loading