Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 398205b8 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Jens Axboe
Browse files

blk_mq: call preempt_disable/enable in blk_mq_run_hw_queue, and only if needed



preempt_disable/enable surrounds every call to blk_mq_run_hw_queue,
except the one in blk-flush.c.  In fact that one is always asynchronous,
and it does not need smp_processor_id().

We can do the same for all other calls, avoiding preempt_disable when
async is true.  This avoids peppering blk-mq.c with preemption-disabled
regions.

Cc: Jens Axboe <axboe@kernel.dk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Reported-by: default avatarClark Williams <williams@redhat.com>
Tested-by: default avatarClark Williams <williams@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 9c6ac78e
Loading
Loading
Loading
Loading
+12 −9
Original line number Diff line number Diff line
@@ -801,9 +801,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
		return;

	if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
	if (!async) {
		preempt_disable();
		if (cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) {
			__blk_mq_run_hw_queue(hctx);
	else if (hctx->queue->nr_hw_queues == 1)
			preempt_enable();
			return;
		}

		preempt_enable();
	}

	if (hctx->queue->nr_hw_queues == 1)
		kblockd_schedule_delayed_work(&hctx->run_work, 0);
	else {
		unsigned int cpu;
@@ -824,9 +833,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
			continue;

		preempt_disable();
		blk_mq_run_hw_queue(hctx, async);
		preempt_enable();
	}
}
EXPORT_SYMBOL(blk_mq_run_queues);
@@ -853,9 +860,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);

	preempt_disable();
	blk_mq_run_hw_queue(hctx, false);
	preempt_enable();
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

@@ -880,9 +885,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
			continue;

		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
		preempt_disable();
		blk_mq_run_hw_queue(hctx, async);
		preempt_enable();
	}
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);