Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee1b6f7a authored by Shaohua Li's avatar Shaohua Li Committed by Jens Axboe
Browse files

block: support different tag allocation policy



The libata tag allocation is using a round-robin policy. Next patch will
make libata use block generic tag allocation, so let's add a policy to
tag allocation.

Currently two policies: FIFO (default) and round-robin.

Cc: Jens Axboe <axboe@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent bb5c3cdd
Loading
Loading
Loading
Loading
+25 −8
Original line number Diff line number Diff line
@@ -119,7 +119,7 @@ fail:
}

static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
						   int depth)
						int depth, int alloc_policy)
{
	struct blk_queue_tag *tags;

@@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
		goto fail;

	atomic_set(&tags->refcnt, 1);
	tags->alloc_policy = alloc_policy;
	tags->next_tag = 0;
	return tags;
fail:
	kfree(tags);
@@ -140,10 +142,11 @@ fail:
/**
 * blk_init_tags - initialize the tag info for an external tag map
 * @depth:	the maximum queue depth supported
 * @alloc_policy: tag allocation policy
 **/
struct blk_queue_tag *blk_init_tags(int depth)
struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
{
	return __blk_queue_init_tags(NULL, depth);
	return __blk_queue_init_tags(NULL, depth, alloc_policy);
}
EXPORT_SYMBOL(blk_init_tags);

@@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags);
 * @q:  the request queue for the device
 * @depth:  the maximum queue depth supported
 * @tags: the tag to use
 * @alloc_policy: tag allocation policy
 *
 * Queue lock must be held here if the function is called to resize an
 * existing map.
 **/
int blk_queue_init_tags(struct request_queue *q, int depth,
			struct blk_queue_tag *tags)
			struct blk_queue_tag *tags, int alloc_policy)
{
	int rc;

	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);

	if (!tags && !q->queue_tags) {
		tags = __blk_queue_init_tags(q, depth);
		tags = __blk_queue_init_tags(q, depth, alloc_policy);

		if (!tags)
			return -ENOMEM;
@@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
	}

	do {
		if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
			tag = find_first_zero_bit(bqt->tag_map, max_depth);
			if (tag >= max_depth)
				return 1;
		} else {
			int start = bqt->next_tag;
			int size = min_t(int, bqt->max_depth, max_depth + start);
			tag = find_next_zero_bit(bqt->tag_map, size, start);
			if (tag >= size && start + size > bqt->max_depth) {
				size = start + size - bqt->max_depth;
				tag = find_first_zero_bit(bqt->tag_map, size);
			}
			if (tag >= size)
				return 1;
		}

	} while (test_and_set_bit_lock(tag, bqt->tag_map));
	/*
@@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
	 * See blk_queue_end_tag for details.
	 */

	bqt->next_tag = (tag + 1) % bqt->max_depth;
	rq->cmd_flags |= REQ_QUEUED;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
+1 −1
Original line number Diff line number Diff line
@@ -423,7 +423,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
	}

	/* switch queue to TCQ mode; allocate tag map */
	rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL);
	rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL, BLK_TAG_ALLOC_FIFO);
	if (rc) {
		blk_cleanup_queue(q);
		put_disk(disk);
+2 −1
Original line number Diff line number Diff line
@@ -290,7 +290,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
	if (!shost_use_blk_mq(sdev->host) &&
	    (shost->bqt || shost->hostt->use_blk_tags)) {
		blk_queue_init_tags(sdev->request_queue,
				    sdev->host->cmd_per_lun, shost->bqt);
				    sdev->host->cmd_per_lun, shost->bqt,
				    shost->hostt->tag_alloc_policy);
	}
	scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);

+6 −2
Original line number Diff line number Diff line
@@ -272,7 +272,11 @@ struct blk_queue_tag {
	int max_depth;			/* what we will send to device */
	int real_max_depth;		/* what the array can hold */
	atomic_t refcnt;		/* map can be shared */
	int alloc_policy;		/* tag allocation policy */
	int next_tag;			/* next tag */
};
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */

#define BLK_SCSI_MAX_CMDS	(256)
#define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@ -1139,11 +1143,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
extern void blk_queue_free_tags(struct request_queue *);
extern int blk_queue_resize_tags(struct request_queue *, int);
extern void blk_queue_invalidate_tags(struct request_queue *);
extern struct blk_queue_tag *blk_init_tags(int);
extern struct blk_queue_tag *blk_init_tags(int, int);
extern void blk_free_tags(struct blk_queue_tag *);

static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
+3 −0
Original line number Diff line number Diff line
@@ -402,6 +402,9 @@ struct scsi_host_template {
	 */
	unsigned char present;

	/* If use block layer to manage tags, this is tag allocation policy */
	int tag_alloc_policy;

	/*
	 * Let the block layer assigns tags to all commands.
	 */
Loading