Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 131d08e1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: split the blk-mq case from elevator_init



There is almost no shared logic, which leads to a very confusing code
flow.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Tested-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent acddf3b3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2573,7 +2573,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
		int ret;

		ret = elevator_init(q);
		ret = elevator_init_mq(q);
		if (ret)
			return ERR_PTR(ret);
	}
+1 −0
Original line number Diff line number Diff line
@@ -232,6 +232,7 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
}

int elevator_init(struct request_queue *);
int elevator_init_mq(struct request_queue *q);
void elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);
+46 −31
Original line number Diff line number Diff line
@@ -199,6 +199,11 @@ static void elevator_release(struct kobject *kobj)
	kfree(e);
}

/*
 * Use the default elevator specified by config boot param for non-mq devices,
 * or by config option.  Don't try to load modules as we could be running off
 * async and request_module() isn't allowed from async.
 */
int elevator_init(struct request_queue *q)
{
	struct elevator_type *e = NULL;
@@ -212,45 +217,21 @@ int elevator_init(struct request_queue *q)
	if (unlikely(q->elevator))
		goto out_unlock;

	/*
	 * Use the default elevator specified by config boot param for
	 * non-mq devices, or by config option. Don't try to load modules
	 * as we could be running off async and request_module() isn't
	 * allowed from async.
	 */
	if (!q->mq_ops && *chosen_elevator) {
	if (*chosen_elevator) {
		e = elevator_get(q, chosen_elevator, false);
		if (!e)
			printk(KERN_ERR "I/O scheduler %s not found\n",
							chosen_elevator);
	}

	if (!e) {
		/*
		 * For blk-mq devices, we default to using mq-deadline,
		 * if available, for single queue devices. If deadline
		 * isn't available OR we have multiple queues, default
		 * to "none".
		 */
		if (q->mq_ops) {
			if (q->nr_hw_queues == 1)
				e = elevator_get(q, "mq-deadline", false);
	if (!e)
				goto out_unlock;
		} else
		e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);

	if (!e) {
		printk(KERN_ERR
				"Default I/O scheduler not found. " \
				"Using noop.\n");
			"Default I/O scheduler not found. Using noop.\n");
		e = elevator_get(q, "noop", false);
	}
	}

	if (e->uses_mq)
		err = blk_mq_init_sched(q, e);
	else
	err = e->ops.sq.elevator_init_fn(q, e);
	if (err)
		elevator_put(e);
@@ -992,6 +973,40 @@ static int elevator_switch_mq(struct request_queue *q,
	return ret;
}

/*
 * For blk-mq devices, we default to using mq-deadline, if available, for single
 * queue devices.  If deadline isn't available OR we have multiple queues,
 * default to "none".
 */
int elevator_init_mq(struct request_queue *q)
{
	struct elevator_type *e;
	int err = 0;

	if (q->nr_hw_queues != 1)
		return 0;

	/*
	 * q->sysfs_lock must be held to provide mutual exclusion between
	 * elevator_switch() and here.
	 */
	mutex_lock(&q->sysfs_lock);
	if (unlikely(q->elevator))
		goto out_unlock;

	e = elevator_get(q, "mq-deadline", false);
	if (!e)
		goto out_unlock;

	err = blk_mq_init_sched(q, e);
	if (err)
		elevator_put(e);
out_unlock:
	mutex_unlock(&q->sysfs_lock);
	return err;
}


/*
 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 * we don't free the old io scheduler, before we have allocated what we