Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f9cd4bfe authored by Jens Axboe's avatar Jens Axboe
Browse files

block: get rid of MQ scheduler ops union



This is a remnant of when we had ops for both SQ and MQ
schedulers. Now it's just MQ, so get rid of the union.

Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a1ce35fa
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -5724,7 +5724,7 @@ static struct elv_fs_entry bfq_attrs[] = {
};

static struct elevator_type iosched_bfq_mq = {
	.ops.mq = {
	.ops = {
		.limit_depth		= bfq_limit_depth,
		.prepare_request	= bfq_prepare_request,
		.requeue_request        = bfq_finish_requeue_request,
+4 −4
Original line number Diff line number Diff line
@@ -48,8 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq)
	if (icq->flags & ICQ_EXITED)
		return;

	if (et->ops.mq.exit_icq)
		et->ops.mq.exit_icq(icq);
	if (et->ops.exit_icq)
		et->ops.exit_icq(icq);

	icq->flags |= ICQ_EXITED;
}
@@ -396,8 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
		list_add(&icq->q_node, &q->icq_list);
		if (et->ops.mq.init_icq)
			et->ops.mq.init_icq(icq);
		if (et->ops.init_icq)
			et->ops.init_icq(icq);
	} else {
		kmem_cache_free(et->icq_cache, icq);
		icq = ioc_lookup_icq(ioc, q);
+16 −17
Original line number Diff line number Diff line
@@ -85,14 +85,13 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
	do {
		struct request *rq;

		if (e->type->ops.mq.has_work &&
				!e->type->ops.mq.has_work(hctx))
		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
			break;

		if (!blk_mq_get_dispatch_budget(hctx))
			break;

		rq = e->type->ops.mq.dispatch_request(hctx);
		rq = e->type->ops.dispatch_request(hctx);
		if (!rq) {
			blk_mq_put_dispatch_budget(hctx);
			break;
@@ -163,7 +162,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
	const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
	const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
	LIST_HEAD(rq_list);

	/* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -314,9 +313,9 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
	bool ret = false;

	if (e && e->type->ops.mq.bio_merge) {
	if (e && e->type->ops.bio_merge) {
		blk_mq_put_ctx(ctx);
		return e->type->ops.mq.bio_merge(hctx, bio);
		return e->type->ops.bio_merge(hctx, bio);
	}

	if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
@@ -380,11 +379,11 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
	if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
		goto run;

	if (e && e->type->ops.mq.insert_requests) {
	if (e && e->type->ops.insert_requests) {
		LIST_HEAD(list);

		list_add(&rq->queuelist, &list);
		e->type->ops.mq.insert_requests(hctx, &list, at_head);
		e->type->ops.insert_requests(hctx, &list, at_head);
	} else {
		spin_lock(&ctx->lock);
		__blk_mq_insert_request(hctx, rq, at_head);
@@ -403,8 +402,8 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
	struct elevator_queue *e = hctx->queue->elevator;

	if (e && e->type->ops.mq.insert_requests)
		e->type->ops.mq.insert_requests(hctx, list, false);
	if (e && e->type->ops.insert_requests)
		e->type->ops.insert_requests(hctx, list, false);
	else {
		/*
		 * try to issue requests directly if the hw queue isn't
@@ -489,15 +488,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
			goto err;
	}

	ret = e->ops.mq.init_sched(q, e);
	ret = e->ops.init_sched(q, e);
	if (ret)
		goto err;

	blk_mq_debugfs_register_sched(q);

	queue_for_each_hw_ctx(q, hctx, i) {
		if (e->ops.mq.init_hctx) {
			ret = e->ops.mq.init_hctx(hctx, i);
		if (e->ops.init_hctx) {
			ret = e->ops.init_hctx(hctx, i);
			if (ret) {
				eq = q->elevator;
				blk_mq_exit_sched(q, eq);
@@ -523,14 +522,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)

	queue_for_each_hw_ctx(q, hctx, i) {
		blk_mq_debugfs_unregister_sched_hctx(hctx);
		if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
			e->type->ops.mq.exit_hctx(hctx, i);
		if (e->type->ops.exit_hctx && hctx->sched_data) {
			e->type->ops.exit_hctx(hctx, i);
			hctx->sched_data = NULL;
		}
	}
	blk_mq_debugfs_unregister_sched(q);
	if (e->type->ops.mq.exit_sched)
		e->type->ops.mq.exit_sched(e);
	if (e->type->ops.exit_sched)
		e->type->ops.exit_sched(e);
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
}
+10 −10
Original line number Diff line number Diff line
@@ -43,8 +43,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
{
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.mq.allow_merge)
		return e->type->ops.mq.allow_merge(q, rq, bio);
	if (e && e->type->ops.allow_merge)
		return e->type->ops.allow_merge(q, rq, bio);

	return true;
}
@@ -53,8 +53,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
	struct elevator_queue *e = rq->q->elevator;

	if (e && e->type->ops.mq.completed_request)
		e->type->ops.mq.completed_request(rq, now);
	if (e && e->type->ops.completed_request)
		e->type->ops.completed_request(rq, now);
}

static inline void blk_mq_sched_started_request(struct request *rq)
@@ -62,8 +62,8 @@ static inline void blk_mq_sched_started_request(struct request *rq)
	struct request_queue *q = rq->q;
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.mq.started_request)
		e->type->ops.mq.started_request(rq);
	if (e && e->type->ops.started_request)
		e->type->ops.started_request(rq);
}

static inline void blk_mq_sched_requeue_request(struct request *rq)
@@ -71,16 +71,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
	struct request_queue *q = rq->q;
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.mq.requeue_request)
		e->type->ops.mq.requeue_request(rq);
	if (e && e->type->ops.requeue_request)
		e->type->ops.requeue_request(rq);
}

static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
{
	struct elevator_queue *e = hctx->queue->elevator;

	if (e && e->type->ops.mq.has_work)
		return e->type->ops.mq.has_work(hctx);
	if (e && e->type->ops.has_work)
		return e->type->ops.has_work(hctx);

	return false;
}
+6 −6
Original line number Diff line number Diff line
@@ -363,9 +363,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
		 * dispatch list. Don't include reserved tags in the
		 * limiting, as it isn't useful.
		 */
		if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
		if (!op_is_flush(op) && e->type->ops.limit_depth &&
		    !(data->flags & BLK_MQ_REQ_RESERVED))
			e->type->ops.mq.limit_depth(op, data);
			e->type->ops.limit_depth(op, data);
	} else {
		blk_mq_tag_busy(data->hctx);
	}
@@ -383,11 +383,11 @@ static struct request *blk_mq_get_request(struct request_queue *q,
	rq = blk_mq_rq_ctx_init(data, tag, op);
	if (!op_is_flush(op)) {
		rq->elv.icq = NULL;
		if (e && e->type->ops.mq.prepare_request) {
		if (e && e->type->ops.prepare_request) {
			if (e->type->icq_cache && rq_ioc(bio))
				blk_mq_sched_assign_ioc(rq, bio);

			e->type->ops.mq.prepare_request(rq, bio);
			e->type->ops.prepare_request(rq, bio);
			rq->rq_flags |= RQF_ELVPRIV;
		}
	}
@@ -491,8 +491,8 @@ void blk_mq_free_request(struct request *rq)
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);

	if (rq->rq_flags & RQF_ELVPRIV) {
		if (e && e->type->ops.mq.finish_request)
			e->type->ops.mq.finish_request(rq);
		if (e && e->type->ops.finish_request)
			e->type->ops.finish_request(rq);
		if (rq->elv.icq) {
			put_io_context(rq->elv.icq->ioc);
			rq->elv.icq = NULL;
Loading