Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1ce35fa authored by Jens Axboe's avatar Jens Axboe
Browse files

block: remove dead elevator code



This removes a bunch of core and elevator related code. On the core
front, we remove anything related to queue running, draining,
initialization, plugging, and congestions. We also kill anything
related to request allocation, merging, retrieval, and completion.

Remove any checking for single queue IO schedulers, as they no
longer exist. This means we can also delete a bunch of code related
to request issue, adding, completion, etc - and all the SQ related
ops and helpers.

Also kill the load_default_modules(), as all that did was provide
for a way to load the default single queue elevator.

Tested-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f382fb0b
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -5745,7 +5745,6 @@ static struct elevator_type iosched_bfq_mq = {
		.exit_sched		= bfq_exit_queue,
	},

	.uses_mq =		true,
	.icq_size =		sizeof(struct bfq_io_cq),
	.icq_align =		__alignof__(struct bfq_io_cq),
	.elevator_attrs =	bfq_attrs,
+34 −1715

File changed.

Preview size limit exceeded, changes collapsed.

+1 −19
Original line number Diff line number Diff line
@@ -48,8 +48,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
			   struct request *rq, int at_head,
			   rq_end_io_fn *done)
{
	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;

	WARN_ON(irqs_disabled());
	WARN_ON(!blk_rq_is_passthrough(rq));

@@ -60,23 +58,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
	 * don't check dying flag for MQ because the request won't
	 * be reused after dying flag is set
	 */
	if (q->mq_ops) {
	blk_mq_sched_insert_request(rq, at_head, true, false);
		return;
	}

	spin_lock_irq(q->queue_lock);

	if (unlikely(blk_queue_dying(q))) {
		rq->rq_flags |= RQF_QUIET;
		__blk_end_request_all(rq, BLK_STS_IOERR);
		spin_unlock_irq(q->queue_lock);
		return;
	}

	__elv_add_request(q, rq, where);
	__blk_run_queue(q);
	spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);

+6 −27
Original line number Diff line number Diff line
@@ -48,10 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq)
	if (icq->flags & ICQ_EXITED)
		return;

	if (et->uses_mq && et->ops.mq.exit_icq)
	if (et->ops.mq.exit_icq)
		et->ops.mq.exit_icq(icq);
	else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
		et->ops.sq.elevator_exit_icq_fn(icq);

	icq->flags |= ICQ_EXITED;
}
@@ -187,25 +185,13 @@ void put_io_context_active(struct io_context *ioc)
	 * reverse double locking.  Read comment in ioc_release_fn() for
	 * explanation on the nested locking annotation.
	 */
retry:
	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
		if (icq->flags & ICQ_EXITED)
			continue;

		et = icq->q->elevator->type;
		if (et->uses_mq) {
		ioc_exit_icq(icq);
		} else {
			if (spin_trylock(icq->q->queue_lock)) {
				ioc_exit_icq(icq);
				spin_unlock(icq->q->queue_lock);
			} else {
				spin_unlock_irqrestore(&ioc->lock, flags);
				cpu_relax();
				goto retry;
			}
		}
	}
	spin_unlock_irqrestore(&ioc->lock, flags);

@@ -253,14 +239,9 @@ void ioc_clear_queue(struct request_queue *q)

	spin_lock_irq(q->queue_lock);
	list_splice_init(&q->icq_list, &icq_list);

	if (q->mq_ops) {
	spin_unlock_irq(q->queue_lock);

	__ioc_clear_queue(&icq_list);
	} else {
		__ioc_clear_queue(&icq_list);
		spin_unlock_irq(q->queue_lock);
	}
}

int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
@@ -415,10 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
		list_add(&icq->q_node, &q->icq_list);
		if (et->uses_mq && et->ops.mq.init_icq)
		if (et->ops.mq.init_icq)
			et->ops.mq.init_icq(icq);
		else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
			et->ops.sq.elevator_init_icq_fn(icq);
	} else {
		kmem_cache_free(et->icq_cache, icq);
		icq = ioc_lookup_icq(ioc, q);
+0 −5
Original line number Diff line number Diff line
@@ -862,13 +862,8 @@ struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	struct elevator_queue *e = q->elevator;
	struct request *free;

	if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
		if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
			return 0;

	free = attempt_merge(q, rq, next);
	if (free) {
		__blk_put_request(q, free);
Loading