Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed76e329 authored by Jens Axboe's avatar Jens Axboe
Browse files

blk-mq: abstract out queue map



This is in preparation for allowing multiple sets of maps per
queue, if so desired.

Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a8908939
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -30,10 +30,10 @@ static int get_first_sibling(unsigned int cpu)
	return cpu;
}

int blk_mq_map_queues(struct blk_mq_tag_set *set)
int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
	unsigned int *map = set->mq_map;
	unsigned int nr_queues = set->nr_hw_queues;
	unsigned int *map = qmap->mq_map;
	unsigned int nr_queues = qmap->nr_queues;
	unsigned int cpu, first_sibling;

	for_each_possible_cpu(cpu) {
@@ -62,12 +62,12 @@ EXPORT_SYMBOL_GPL(blk_mq_map_queues);
 * We have no quick way of doing reverse lookups. This is only used at
 * queue init time, so runtime isn't important.
 */
int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
	int i;

	for_each_possible_cpu(i) {
		if (index == mq_map[i])
		if (index == qmap->mq_map[i])
			return local_memory_node(cpu_to_node(i));
	}

+5 −5
Original line number Diff line number Diff line
@@ -31,26 +31,26 @@
 * that maps a queue to the CPUs that have irq affinity for the corresponding
 * vector.
 */
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
			    int offset)
{
	const struct cpumask *mask;
	unsigned int queue, cpu;

	for (queue = 0; queue < set->nr_hw_queues; queue++) {
	for (queue = 0; queue < qmap->nr_queues; queue++) {
		mask = pci_irq_get_affinity(pdev, queue + offset);
		if (!mask)
			goto fallback;

		for_each_cpu(cpu, mask)
			set->mq_map[cpu] = queue;
			qmap->mq_map[cpu] = queue;
	}

	return 0;

fallback:
	WARN_ON_ONCE(set->nr_hw_queues > 1);
	blk_mq_clear_mq_map(set);
	WARN_ON_ONCE(qmap->nr_queues > 1);
	blk_mq_clear_mq_map(qmap);
	return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
+2 −2
Original line number Diff line number Diff line
@@ -41,12 +41,12 @@ int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
			goto fallback;

		for_each_cpu(cpu, mask)
			set->mq_map[cpu] = queue;
			set->map[0].mq_map[cpu] = queue;
	}

	return 0;

fallback:
	return blk_mq_map_queues(set);
	return blk_mq_map_queues(&set->map[0]);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
+4 −4
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@
 * that maps a queue to the CPUs that have irq affinity for the corresponding
 * vector.
 */
int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
		struct virtio_device *vdev, int first_vec)
{
	const struct cpumask *mask;
@@ -38,17 +38,17 @@ int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
	if (!vdev->config->get_vq_affinity)
		goto fallback;

	for (queue = 0; queue < set->nr_hw_queues; queue++) {
	for (queue = 0; queue < qmap->nr_queues; queue++) {
		mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
		if (!mask)
			goto fallback;

		for_each_cpu(cpu, mask)
			set->mq_map[cpu] = queue;
			qmap->mq_map[cpu] = queue;
	}

	return 0;
fallback:
	return blk_mq_map_queues(set);
	return blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
+18 −16
Original line number Diff line number Diff line
@@ -1975,7 +1975,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
	struct blk_mq_tags *tags;
	int node;

	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
	node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

@@ -2031,7 +2031,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
	size_t rq_size, left;
	int node;

	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
	node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

@@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
	 * If the cpu isn't present, the cpu is mapped to first hctx.
	 */
	for_each_possible_cpu(i) {
		hctx_idx = set->mq_map[i];
		hctx_idx = set->map[0].mq_map[i];
		/* unmapped hw queue can be remapped after CPU topo changed */
		if (!set->tags[hctx_idx] &&
		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
			 * case, remap the current ctx to hctx[0] which
			 * is guaranteed to always have tags allocated
			 */
			set->mq_map[i] = 0;
			set->map[0].mq_map[i] = 0;
		}

		ctx = per_cpu_ptr(q->queue_ctx, i);
@@ -2585,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
		int node;
		struct blk_mq_hw_ctx *hctx;

		node = blk_mq_hw_queue_to_node(set->mq_map, i);
		node = blk_mq_hw_queue_to_node(&set->map[0], i);
		/*
		 * If the hw queue has been mapped to another numa node,
		 * we need to realloc the hctx. If allocation fails, fallback
@@ -2791,18 +2791,18 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
		 * 	mask = get_cpu_mask(queue)
		 * 	for_each_cpu(cpu, mask)
		 * 		set->mq_map[cpu] = queue;
		 * 		set->map.mq_map[cpu] = queue;
		 * }
		 *
		 * When we need to remap, the table has to be cleared for
		 * killing stale mapping since one CPU may not be mapped
		 * to any hw queue.
		 */
		blk_mq_clear_mq_map(set);
		blk_mq_clear_mq_map(&set->map[0]);

		return set->ops->map_queues(set);
	} else
		return blk_mq_map_queues(set);
		return blk_mq_map_queues(&set->map[0]);
}

/*
@@ -2857,10 +2857,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
		return -ENOMEM;

	ret = -ENOMEM;
	set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
	set->map[0].mq_map = kcalloc_node(nr_cpu_ids,
					  sizeof(*set->map[0].mq_map),
					  GFP_KERNEL, set->numa_node);
	if (!set->mq_map)
	if (!set->map[0].mq_map)
		goto out_free_tags;
	set->map[0].nr_queues = set->nr_hw_queues;

	ret = blk_mq_update_queue_map(set);
	if (ret)
@@ -2876,8 +2878,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
	return 0;

out_free_mq_map:
	kfree(set->mq_map);
	set->mq_map = NULL;
	kfree(set->map[0].mq_map);
	set->map[0].mq_map = NULL;
out_free_tags:
	kfree(set->tags);
	set->tags = NULL;
@@ -2892,8 +2894,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
	for (i = 0; i < nr_cpu_ids; i++)
		blk_mq_free_map_and_requests(set, i);

	kfree(set->mq_map);
	set->mq_map = NULL;
	kfree(set->map[0].mq_map);
	set->map[0].mq_map = NULL;

	kfree(set->tags);
	set->tags = NULL;
@@ -3054,7 +3056,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
					nr_hw_queues, prev_nr_hw_queues);
			set->nr_hw_queues = prev_nr_hw_queues;
			blk_mq_map_queues(set);
			blk_mq_map_queues(&set->map[0]);
			goto fallback;
		}
		blk_mq_map_swqueue(q);
Loading