Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b157939 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: get rid of the cpumask in struct blk_mq_tags



Unused now that NVMe sets up irq affinity before calling into blk-mq.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent b5af7f2f
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -665,11 +665,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
	if (!tags)
		return NULL;

	if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
		kfree(tags);
		return NULL;
	}

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;

@@ -680,7 +675,6 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
{
	bt_free(&tags->bitmap_tags);
	bt_free(&tags->breserved_tags);
	free_cpumask_var(tags->cpumask);
	kfree(tags);
}

+0 −1
Original line number Diff line number Diff line
@@ -44,7 +44,6 @@ struct blk_mq_tags {
	struct list_head page_list;

	int alloc_policy;
	cpumask_var_t cpumask;
};


+21 −4
Original line number Diff line number Diff line
@@ -1861,7 +1861,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);

		cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
		/*
		 * Set the map size to the number of mapped software queues.
		 * This is more accurate and more efficient than looping
@@ -2272,11 +2271,29 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
	return 0;
}

struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
static int blk_mq_create_mq_map(struct blk_mq_tag_set *set,
		const struct cpumask *affinity_mask)
{
	return tags->cpumask;
	int queue = -1, cpu = 0;

	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
			GFP_KERNEL, set->numa_node);
	if (!set->mq_map)
		return -ENOMEM;

	if (!affinity_mask)
		return 0;	/* map all cpus to queue 0 */

	/* If cpus are offline, map them to first hctx */
	for_each_online_cpu(cpu) {
		if (cpumask_test_cpu(cpu, affinity_mask))
			queue++;
		if (queue >= 0)
			set->mq_map[cpu] = queue;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);

/*
 * Alloc a tag set to be associated with one or more request queues.
+0 −1
Original line number Diff line number Diff line
@@ -201,7 +201,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
		unsigned int flags, unsigned int hctx_idx);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);

enum {
	BLK_MQ_UNIQUE_TAG_BITS = 16,