Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 40aabb67 authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe
Browse files

sbitmap: push per-cpu last_tag into sbitmap_queue



Allocating your own per-cpu allocation hint separately makes for an
awkward API. Instead, allocate the per-cpu hint as part of the struct
sbitmap_queue. There's no point for a struct sbitmap_queue without the
cache, but you can still use a bare struct sbitmap.

Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 48e28166
Loading
Loading
Loading
Loading
+17 −36
Original line number Original line Diff line number Diff line
@@ -94,39 +94,21 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)


static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
		    unsigned int *tag_cache, struct blk_mq_tags *tags)
		    struct blk_mq_tags *tags)
{
{
	unsigned int last_tag;
	int tag;

	if (!hctx_may_queue(hctx, bt))
	if (!hctx_may_queue(hctx, bt))
		return -1;
		return -1;

	return __sbitmap_queue_get(bt, BT_ALLOC_RR(tags));
	last_tag = *tag_cache;
	tag = sbitmap_get(&bt->sb, last_tag, BT_ALLOC_RR(tags));

	if (tag == -1) {
		*tag_cache = 0;
	} else if (tag == last_tag || unlikely(BT_ALLOC_RR(tags))) {
		last_tag = tag + 1;
		if (last_tag >= bt->sb.depth - 1)
			last_tag = 0;
		*tag_cache = last_tag;
}
}


	return tag;
static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
}
		  struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)

static int bt_get(struct blk_mq_alloc_data *data,
		  struct sbitmap_queue *bt,
		  struct blk_mq_hw_ctx *hctx,
		  unsigned int *last_tag, struct blk_mq_tags *tags)
{
{
	struct sbq_wait_state *ws;
	struct sbq_wait_state *ws;
	DEFINE_WAIT(wait);
	DEFINE_WAIT(wait);
	int tag;
	int tag;


	tag = __bt_get(hctx, bt, last_tag, tags);
	tag = __bt_get(hctx, bt, tags);
	if (tag != -1)
	if (tag != -1)
		return tag;
		return tag;


@@ -137,7 +119,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
	do {
	do {
		prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
		prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);


		tag = __bt_get(hctx, bt, last_tag, tags);
		tag = __bt_get(hctx, bt, tags);
		if (tag != -1)
		if (tag != -1)
			break;
			break;


@@ -154,7 +136,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
		 * Retry tag allocation after running the hardware queue,
		 * Retry tag allocation after running the hardware queue,
		 * as running the queue may also have found completions.
		 * as running the queue may also have found completions.
		 */
		 */
		tag = __bt_get(hctx, bt, last_tag, tags);
		tag = __bt_get(hctx, bt, tags);
		if (tag != -1)
		if (tag != -1)
			break;
			break;


@@ -168,7 +150,6 @@ static int bt_get(struct blk_mq_alloc_data *data,
		if (data->flags & BLK_MQ_REQ_RESERVED) {
		if (data->flags & BLK_MQ_REQ_RESERVED) {
			bt = &data->hctx->tags->breserved_tags;
			bt = &data->hctx->tags->breserved_tags;
		} else {
		} else {
			last_tag = &data->ctx->last_tag;
			hctx = data->hctx;
			hctx = data->hctx;
			bt = &hctx->tags->bitmap_tags;
			bt = &hctx->tags->bitmap_tags;
		}
		}
@@ -185,7 +166,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
	int tag;
	int tag;


	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
			&data->ctx->last_tag, data->hctx->tags);
		     data->hctx->tags);
	if (tag >= 0)
	if (tag >= 0)
		return tag + data->hctx->tags->nr_reserved_tags;
		return tag + data->hctx->tags->nr_reserved_tags;


@@ -194,14 +175,14 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)


static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
{
{
	int tag, zero = 0;
	int tag;


	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
		WARN_ON_ONCE(1);
		WARN_ON_ONCE(1);
		return BLK_MQ_TAG_FAIL;
		return BLK_MQ_TAG_FAIL;
	}
	}


	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
		     data->hctx->tags);
		     data->hctx->tags);
	if (tag < 0)
	if (tag < 0)
		return BLK_MQ_TAG_FAIL;
		return BLK_MQ_TAG_FAIL;
@@ -216,8 +197,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
	return __blk_mq_get_tag(data);
	return __blk_mq_get_tag(data);
}
}


void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
		    unsigned int *last_tag)
		    unsigned int tag)
{
{
	struct blk_mq_tags *tags = hctx->tags;
	struct blk_mq_tags *tags = hctx->tags;


@@ -225,12 +206,12 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
		const int real_tag = tag - tags->nr_reserved_tags;
		const int real_tag = tag - tags->nr_reserved_tags;


		BUG_ON(real_tag >= tags->nr_tags);
		BUG_ON(real_tag >= tags->nr_tags);
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag);
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag,
		if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
				    BT_ALLOC_RR(tags), ctx->cpu);
			*last_tag = real_tag;
	} else {
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
		BUG_ON(tag >= tags->nr_reserved_tags);
		sbitmap_queue_clear(&tags->breserved_tags, tag);
		sbitmap_queue_clear(&tags->breserved_tags, tag,
				    BT_ALLOC_RR(tags), ctx->cpu);
	}
	}
}
}


+2 −1
Original line number Original line Diff line number Diff line
@@ -27,7 +27,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);


extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			   unsigned int tag);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
+1 −1
Original line number Original line Diff line number Diff line
@@ -303,7 +303,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
	rq->cmd_flags = 0;
	rq->cmd_flags = 0;


	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
	blk_mq_put_tag(hctx, tag, &ctx->last_tag);
	blk_mq_put_tag(hctx, ctx, tag);
	blk_queue_exit(q);
	blk_queue_exit(q);
}
}


+0 −2
Original line number Original line Diff line number Diff line
@@ -12,8 +12,6 @@ struct blk_mq_ctx {
	unsigned int		cpu;
	unsigned int		cpu;
	unsigned int		index_hw;
	unsigned int		index_hw;


	unsigned int		last_tag ____cacheline_aligned_in_smp;

	/* incremented at dispatch time */
	/* incremented at dispatch time */
	unsigned long		rq_dispatched[2];
	unsigned long		rq_dispatched[2];
	unsigned long		rq_merged;
	unsigned long		rq_merged;
+44 −1
Original line number Original line Diff line number Diff line
@@ -99,6 +99,14 @@ struct sbitmap_queue {
	 */
	 */
	struct sbitmap sb;
	struct sbitmap sb;


	/*
	 * @alloc_hint: Cache of last successfully allocated or freed bit.
	 *
	 * This is per-cpu, which allows multiple users to stick to different
	 * cachelines until the map is exhausted.
	 */
	unsigned int __percpu *alloc_hint;

	/**
	/**
	 * @wake_batch: Number of bits which must be freed before we wake up any
	 * @wake_batch: Number of bits which must be freed before we wake up any
	 * waiters.
	 * waiters.
@@ -267,6 +275,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
{
{
	kfree(sbq->ws);
	kfree(sbq->ws);
	free_percpu(sbq->alloc_hint);
	sbitmap_free(&sbq->sb);
	sbitmap_free(&sbq->sb);
}
}


@@ -281,13 +290,47 @@ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
 */
 */
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);


/**
 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
 * sbitmap_queue with preemption already disabled.
 * @sbq: Bitmap queue to allocate from.
 * @round_robin: See sbitmap_get().
 *
 * Return: Non-negative allocated bit number if successful, -1 otherwise.
 */
int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin);

/**
 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
 * sbitmap_queue.
 * @sbq: Bitmap queue to allocate from.
 * @round_robin: See sbitmap_get().
 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
 *       sbitmap_queue_clear()).
 *
 * Return: Non-negative allocated bit number if successful, -1 otherwise.
 */
static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin,
				    unsigned int *cpu)
{
	int nr;

	*cpu = get_cpu();
	nr = __sbitmap_queue_get(sbq, round_robin);
	put_cpu();
	return nr;
}

/**
/**
 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
 * &struct sbitmap_queue.
 * &struct sbitmap_queue.
 * @sbq: Bitmap to free from.
 * @sbq: Bitmap to free from.
 * @nr: Bit number to free.
 * @nr: Bit number to free.
 * @round_robin: See sbitmap_get().
 * @cpu: CPU the bit was allocated on.
 */
 */
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
			 bool round_robin, unsigned int cpu);


static inline int sbq_index_inc(int index)
static inline int sbq_index_inc(int index)
{
{
Loading