Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 344e9ffc authored by Jens Axboe's avatar Jens Axboe
Browse files

block: add queue_is_mq() helper



Various spots check for q->mq_ops being non-NULL, but provide
a helper to do this instead.

Where the ->mq_ops != NULL check is redundant, remove it.

Since mq == rq-based now that legacy is gone, get rid of the
queue_is_rq_based() and just use queue_is_mq() everywhere.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dabcefab
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -1324,7 +1324,7 @@ int blkcg_activate_policy(struct request_queue *q,
	if (blkcg_policy_enabled(q, pol))
		return 0;

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_freeze_queue(q);
pd_prealloc:
	if (!pd_prealloc) {
@@ -1363,7 +1363,7 @@ int blkcg_activate_policy(struct request_queue *q,

	spin_unlock_irq(&q->queue_lock);
out_bypass_end:
	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_unfreeze_queue(q);
	if (pd_prealloc)
		pol->pd_free_fn(pd_prealloc);
@@ -1387,7 +1387,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
	if (!blkcg_policy_enabled(q, pol))
		return;

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_freeze_queue(q);

	spin_lock_irq(&q->queue_lock);
@@ -1405,7 +1405,7 @@ void blkcg_deactivate_policy(struct request_queue *q,

	spin_unlock_irq(&q->queue_lock);

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_unfreeze_queue(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
+6 −6
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ void blk_sync_queue(struct request_queue *q)
	del_timer_sync(&q->timeout);
	cancel_work_sync(&q->timeout_work);

	if (q->mq_ops) {
	if (queue_is_mq(q)) {
		struct blk_mq_hw_ctx *hctx;
		int i;

@@ -281,7 +281,7 @@ void blk_set_queue_dying(struct request_queue *q)
	 */
	blk_freeze_queue_start(q);

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_wake_waiters(q);

	/* Make blk_queue_enter() reexamine the DYING flag. */
@@ -356,7 +356,7 @@ void blk_cleanup_queue(struct request_queue *q)
	 * blk_freeze_queue() should be enough for cases of passthrough
	 * request.
	 */
	if (q->mq_ops && blk_queue_init_done(q))
	if (queue_is_mq(q) && blk_queue_init_done(q))
		blk_mq_quiesce_queue(q);

	/* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -374,7 +374,7 @@ void blk_cleanup_queue(struct request_queue *q)

	blk_exit_queue(q);

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_free_queue(q);

	percpu_ref_exit(&q->q_usage_counter);
@@ -982,7 +982,7 @@ generic_make_request_checks(struct bio *bio)
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
	 * if queue is not a request based queue.
	 */
	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
		goto not_supported;

	if (should_fail_bio(bio))
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
 */
int blk_lld_busy(struct request_queue *q)
{
	if (q->mq_ops && q->mq_ops->busy)
	if (queue_is_mq(q) && q->mq_ops->busy)
		return q->mq_ops->busy(q);

	return 0;
+1 −2
Original line number Diff line number Diff line
@@ -273,8 +273,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
	 * assigned to empty flushes, and we deadlock if we are expecting
	 * other requests to make progress. Don't defer for that case.
	 */
	if (!list_empty(&fq->flush_data_in_flight) &&
	    !(q->mq_ops && q->elevator) &&
	if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
	    time_before(jiffies,
			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
		return;
+1 −1
Original line number Diff line number Diff line
@@ -150,7 +150,7 @@ void blk_freeze_queue_start(struct request_queue *q)
	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
	if (freeze_depth == 1) {
		percpu_ref_kill(&q->q_usage_counter);
		if (q->mq_ops)
		if (queue_is_mq(q))
			blk_mq_run_hw_queues(q, false);
	}
}
+7 −7
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
	unsigned long nr;
	int ret, err;

	if (!q->mq_ops)
	if (!queue_is_mq(q))
		return -EINVAL;

	ret = queue_var_store(&nr, page, count);
@@ -835,12 +835,12 @@ static void __blk_release_queue(struct work_struct *work)

	blk_queue_free_zone_bitmaps(q);

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_release(q);

	blk_trace_shutdown(q);

	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_debugfs_unregister(q);

	bioset_exit(&q->bio_split);
@@ -914,7 +914,7 @@ int blk_register_queue(struct gendisk *disk)
		goto unlock;
	}

	if (q->mq_ops) {
	if (queue_is_mq(q)) {
		__blk_mq_register_dev(dev, q);
		blk_mq_debugfs_register(q);
	}
@@ -925,7 +925,7 @@ int blk_register_queue(struct gendisk *disk)

	blk_throtl_register_queue(q);

	if ((q->mq_ops && q->elevator)) {
	if (q->elevator) {
		ret = elv_register_queue(q);
		if (ret) {
			mutex_unlock(&q->sysfs_lock);
@@ -974,7 +974,7 @@ void blk_unregister_queue(struct gendisk *disk)
	 * Remove the sysfs attributes before unregistering the queue data
	 * structures that can be modified through sysfs.
	 */
	if (q->mq_ops)
	if (queue_is_mq(q))
		blk_mq_unregister_dev(disk_to_dev(disk), q);
	mutex_unlock(&q->sysfs_lock);

@@ -983,7 +983,7 @@ void blk_unregister_queue(struct gendisk *disk)
	blk_trace_remove_sysfs(disk_to_dev(disk));

	mutex_lock(&q->sysfs_lock);
	if (q->mq_ops && q->elevator)
	if (q->elevator)
		elv_unregister_queue(q);
	mutex_unlock(&q->sysfs_lock);

Loading