Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa72b903 authored by Tejun Heo's avatar Tejun Heo Committed by Linus Torvalds
Browse files

[PATCH] blk: remove blk_queue_tag->real_max_depth optimization



blk_queue_tag->real_max_depth was used to optimize out unnecessary
allocations/frees on tag resize.  However, the whole thing was very broken -
tag_map was never allocated to real_max_depth resulting in access beyond the
end of the map, bits in [max_depth..real_max_depth] were set when initializing
a map and copied when resizing resulting in pre-occupied tags.

As the gain of the optimization is very small, well, almost nill, remove the
whole thing.

Signed-off-by: default avatarTejun Heo <htejun@gmail.com>
Acked-by: default avatarJens Axboe <axboe@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 2bf0fdad
Loading
Loading
Loading
Loading
+10 −25
Original line number Diff line number Diff line
@@ -717,7 +717,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
{
	struct blk_queue_tag *bqt = q->queue_tags;

	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
	if (unlikely(bqt == NULL || tag >= bqt->max_depth))
		return NULL;

	return bqt->tag_index[tag];
@@ -775,9 +775,9 @@ EXPORT_SYMBOL(blk_queue_free_tags);
static int
init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
{
	int bits, i;
	struct request **tag_index;
	unsigned long *tag_map;
	int nr_ulongs;

	if (depth > q->nr_requests * 2) {
		depth = q->nr_requests * 2;
@@ -789,24 +789,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
	if (!tag_index)
		goto fail;

	bits = (depth / BLK_TAGS_PER_LONG) + 1;
	tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
	nr_ulongs = ALIGN(depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG;
	tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
	if (!tag_map)
		goto fail;

	memset(tag_index, 0, depth * sizeof(struct request *));
	memset(tag_map, 0, bits * sizeof(unsigned long));
	memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
	tags->max_depth = depth;
	tags->real_max_depth = bits * BITS_PER_LONG;
	tags->tag_index = tag_index;
	tags->tag_map = tag_map;

	/*
	 * set the upper bits if the depth isn't a multiple of the word size
	 */
	for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
		__set_bit(i, tag_map);

	return 0;
fail:
	kfree(tag_index);
@@ -871,32 +864,24 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
	struct blk_queue_tag *bqt = q->queue_tags;
	struct request **tag_index;
	unsigned long *tag_map;
	int bits, max_depth;
	int max_depth, nr_ulongs;

	if (!bqt)
		return -ENXIO;

	/*
	 * don't bother sizing down
	 */
	if (new_depth <= bqt->real_max_depth) {
		bqt->max_depth = new_depth;
		return 0;
	}

	/*
	 * save the old state info, so we can copy it back
	 */
	tag_index = bqt->tag_index;
	tag_map = bqt->tag_map;
	max_depth = bqt->real_max_depth;
	max_depth = bqt->max_depth;

	if (init_tag_map(q, bqt, new_depth))
		return -ENOMEM;

	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
	bits = max_depth / BLK_TAGS_PER_LONG;
	memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));
	nr_ulongs = ALIGN(max_depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG;
	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));

	kfree(tag_index);
	kfree(tag_map);
@@ -926,7 +911,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)

	BUG_ON(tag == -1);

	if (unlikely(tag >= bqt->real_max_depth))
	if (unlikely(tag >= bqt->max_depth))
		return;

	if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
+0 −1
Original line number Diff line number Diff line
@@ -294,7 +294,6 @@ struct blk_queue_tag {
	struct list_head busy_list;	/* fifo list of busy tags */
	int busy;			/* current depth */
	int max_depth;			/* what we will send to device */
	int real_max_depth;		/* what the array can hold */
	atomic_t refcnt;		/* map can be shared */
};