Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da695ba2 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: allow the driver to pass in a queue mapping



This allows drivers specify their own queue mapping by overriding the
setup-time function that builds the mq_map.  This can be used for
example to build the map based on the MSI-X vector mapping provided
by the core interrupt layer for PCI devices.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 7d7e0f90
Loading
Loading
Loading
Loading
+5 −20
Original line number Diff line number Diff line
@@ -31,14 +31,16 @@ static int get_first_sibling(unsigned int cpu)
	return cpu;
}

int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
			    const struct cpumask *online_mask)
int blk_mq_map_queues(struct blk_mq_tag_set *set)
{
	unsigned int *map = set->mq_map;
	unsigned int nr_queues = set->nr_hw_queues;
	const struct cpumask *online_mask = cpu_online_mask;
	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
	cpumask_var_t cpus;

	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
		return 1;
		return -ENOMEM;

	cpumask_clear(cpus);
	nr_cpus = nr_uniq_cpus = 0;
@@ -86,23 +88,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
	return 0;
}

unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
{
	unsigned int *map;

	/* If cpus are offline, map them to first hctx */
	map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
				set->numa_node);
	if (!map)
		return NULL;

	if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
		return map;

	kfree(map);
	return NULL;
}

/*
 * We have no quick way of doing reverse lookups. This is only used at
 * queue init time, so runtime isn't important.
+15 −3
Original line number Diff line number Diff line
@@ -2286,6 +2286,8 @@ EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
 */
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
	int ret;

	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);

	if (!set->nr_hw_queues)
@@ -2324,11 +2326,21 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
	if (!set->tags)
		return -ENOMEM;

	set->mq_map = blk_mq_make_queue_map(set);
	ret = -ENOMEM;
	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
			GFP_KERNEL, set->numa_node);
	if (!set->mq_map)
		goto out_free_tags;

	if (blk_mq_alloc_rq_maps(set))
	if (set->ops->map_queues)
		ret = set->ops->map_queues(set);
	else
		ret = blk_mq_map_queues(set);
	if (ret)
		goto out_free_mq_map;

	ret = blk_mq_alloc_rq_maps(set);
	if (ret)
		goto out_free_mq_map;

	mutex_init(&set->tag_list_lock);
@@ -2342,7 +2354,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
out_free_tags:
	kfree(set->tags);
	set->tags = NULL;
	return -ENOMEM;
	return ret;
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);

+1 −3
Original line number Diff line number Diff line
@@ -47,9 +47,7 @@ void blk_mq_disable_hotplug(void);
/*
 * CPU -> queue mappings
 */
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
				   const struct cpumask *online_mask);
int blk_mq_map_queues(struct blk_mq_tag_set *set);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);

static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+3 −0
Original line number Diff line number Diff line
@@ -104,6 +104,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
		bool);
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);


struct blk_mq_ops {
@@ -144,6 +145,8 @@ struct blk_mq_ops {
	init_request_fn		*init_request;
	exit_request_fn		*exit_request;
	reinit_request_fn	*reinit_request;

	map_queues_fn		*map_queues;
};

enum {