Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f4a644db authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe
Browse files

sbitmap: push alloc policy into sbitmap_queue



Again, there's no point in passing this in every time. Make it part of
struct sbitmap_queue and clean up the API.

Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 40aabb67
Loading
Loading
Loading
Loading
+15 −18
Original line number Original line Diff line number Diff line
@@ -91,14 +91,11 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
	return atomic_read(&hctx->nr_active) < depth;
	return atomic_read(&hctx->nr_active) < depth;
}
}


#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)

static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
		    struct blk_mq_tags *tags)
{
{
	if (!hctx_may_queue(hctx, bt))
	if (!hctx_may_queue(hctx, bt))
		return -1;
		return -1;
	return __sbitmap_queue_get(bt, BT_ALLOC_RR(tags));
	return __sbitmap_queue_get(bt);
}
}


static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
@@ -108,7 +105,7 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
	DEFINE_WAIT(wait);
	DEFINE_WAIT(wait);
	int tag;
	int tag;


	tag = __bt_get(hctx, bt, tags);
	tag = __bt_get(hctx, bt);
	if (tag != -1)
	if (tag != -1)
		return tag;
		return tag;


@@ -119,7 +116,7 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
	do {
	do {
		prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
		prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);


		tag = __bt_get(hctx, bt, tags);
		tag = __bt_get(hctx, bt);
		if (tag != -1)
		if (tag != -1)
			break;
			break;


@@ -136,7 +133,7 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
		 * Retry tag allocation after running the hardware queue,
		 * Retry tag allocation after running the hardware queue,
		 * as running the queue may also have found completions.
		 * as running the queue may also have found completions.
		 */
		 */
		tag = __bt_get(hctx, bt, tags);
		tag = __bt_get(hctx, bt);
		if (tag != -1)
		if (tag != -1)
			break;
			break;


@@ -206,12 +203,10 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
		const int real_tag = tag - tags->nr_reserved_tags;
		const int real_tag = tag - tags->nr_reserved_tags;


		BUG_ON(real_tag >= tags->nr_tags);
		BUG_ON(real_tag >= tags->nr_tags);
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag,
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
				    BT_ALLOC_RR(tags), ctx->cpu);
	} else {
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
		BUG_ON(tag >= tags->nr_reserved_tags);
		sbitmap_queue_clear(&tags->breserved_tags, tag,
		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
				    BT_ALLOC_RR(tags), ctx->cpu);
	}
	}
}
}


@@ -363,21 +358,23 @@ static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
	return bt->sb.depth - sbitmap_weight(&bt->sb);
	return bt->sb.depth - sbitmap_weight(&bt->sb);
}
}


static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, int node)
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
		    bool round_robin, int node)
{
{
	return sbitmap_queue_init_node(bt, depth, -1, GFP_KERNEL, node);
	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
				       node);
}
}


static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
						   int node, int alloc_policy)
						   int node, int alloc_policy)
{
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;


	tags->alloc_policy = alloc_policy;
	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))

	if (bt_alloc(&tags->bitmap_tags, depth, node))
		goto free_tags;
		goto free_tags;
	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node))
	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
		     node))
		goto free_bitmap_tags;
		goto free_bitmap_tags;


	return tags;
	return tags;
+0 −1
Original line number Original line Diff line number Diff line
@@ -18,7 +18,6 @@ struct blk_mq_tags {
	struct request **rqs;
	struct request **rqs;
	struct list_head page_list;
	struct list_head page_list;


	int alloc_policy;
	cpumask_var_t cpumask;
	cpumask_var_t cpumask;
};
};


+11 −8
Original line number Original line Diff line number Diff line
@@ -122,6 +122,11 @@ struct sbitmap_queue {
	 * @ws: Wait queues.
	 * @ws: Wait queues.
	 */
	 */
	struct sbq_wait_state *ws;
	struct sbq_wait_state *ws;

	/**
	 * @round_robin: Allocate bits in strict round-robin order.
	 */
	bool round_robin;
};
};


/**
/**
@@ -259,13 +264,14 @@ unsigned int sbitmap_weight(const struct sbitmap *sb);
 * @sbq: Bitmap queue to initialize.
 * @sbq: Bitmap queue to initialize.
 * @depth: See sbitmap_init_node().
 * @depth: See sbitmap_init_node().
 * @shift: See sbitmap_init_node().
 * @shift: See sbitmap_init_node().
 * @round_robin: See sbitmap_get().
 * @flags: Allocation flags.
 * @flags: Allocation flags.
 * @node: Memory node to allocate on.
 * @node: Memory node to allocate on.
 *
 *
 * Return: Zero on success or negative errno on failure.
 * Return: Zero on success or negative errno on failure.
 */
 */
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
			    int shift, gfp_t flags, int node);
			    int shift, bool round_robin, gfp_t flags, int node);


/**
/**
 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
@@ -294,29 +300,27 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
 * sbitmap_queue with preemption already disabled.
 * sbitmap_queue with preemption already disabled.
 * @sbq: Bitmap queue to allocate from.
 * @sbq: Bitmap queue to allocate from.
 * @round_robin: See sbitmap_get().
 *
 *
 * Return: Non-negative allocated bit number if successful, -1 otherwise.
 * Return: Non-negative allocated bit number if successful, -1 otherwise.
 */
 */
int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);


/**
/**
 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
 * sbitmap_queue.
 * sbitmap_queue.
 * @sbq: Bitmap queue to allocate from.
 * @sbq: Bitmap queue to allocate from.
 * @round_robin: See sbitmap_get().
 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
 *       sbitmap_queue_clear()).
 *       sbitmap_queue_clear()).
 *
 *
 * Return: Non-negative allocated bit number if successful, -1 otherwise.
 * Return: Non-negative allocated bit number if successful, -1 otherwise.
 */
 */
static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin,
static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
				    unsigned int *cpu)
				    unsigned int *cpu)
{
{
	int nr;
	int nr;


	*cpu = get_cpu();
	*cpu = get_cpu();
	nr = __sbitmap_queue_get(sbq, round_robin);
	nr = __sbitmap_queue_get(sbq);
	put_cpu();
	put_cpu();
	return nr;
	return nr;
}
}
@@ -326,11 +330,10 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin,
 * &struct sbitmap_queue.
 * &struct sbitmap_queue.
 * @sbq: Bitmap to free from.
 * @sbq: Bitmap to free from.
 * @nr: Bit number to free.
 * @nr: Bit number to free.
 * @round_robin: See sbitmap_get().
 * @cpu: CPU the bit was allocated on.
 * @cpu: CPU the bit was allocated on.
 */
 */
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
			 bool round_robin, unsigned int cpu);
			 unsigned int cpu);


static inline int sbq_index_inc(int index)
static inline int sbq_index_inc(int index)
{
{
+8 −6
Original line number Original line Diff line number Diff line
@@ -196,7 +196,7 @@ static unsigned int sbq_calc_wake_batch(unsigned int depth)
}
}


int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
			    int shift, gfp_t flags, int node)
			    int shift, bool round_robin, gfp_t flags, int node)
{
{
	int ret;
	int ret;
	int i;
	int i;
@@ -225,6 +225,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
		init_waitqueue_head(&sbq->ws[i].wait);
		init_waitqueue_head(&sbq->ws[i].wait);
		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
	}
	}

	sbq->round_robin = round_robin;
	return 0;
	return 0;
}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
@@ -236,18 +238,18 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);


int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin)
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
{
	unsigned int hint;
	unsigned int hint;
	int nr;
	int nr;


	hint = this_cpu_read(*sbq->alloc_hint);
	hint = this_cpu_read(*sbq->alloc_hint);
	nr = sbitmap_get(&sbq->sb, hint, round_robin);
	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);


	if (nr == -1) {
	if (nr == -1) {
		/* If the map is full, a hint won't do us much good. */
		/* If the map is full, a hint won't do us much good. */
		this_cpu_write(*sbq->alloc_hint, 0);
		this_cpu_write(*sbq->alloc_hint, 0);
	} else if (nr == hint || unlikely(round_robin)) {
	} else if (nr == hint || unlikely(sbq->round_robin)) {
		/* Only update the hint if we used it. */
		/* Only update the hint if we used it. */
		hint = nr + 1;
		hint = nr + 1;
		if (hint >= sbq->sb.depth - 1)
		if (hint >= sbq->sb.depth - 1)
@@ -304,11 +306,11 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
}
}


void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
			 bool round_robin, unsigned int cpu)
			 unsigned int cpu)
{
{
	sbitmap_clear_bit(&sbq->sb, nr);
	sbitmap_clear_bit(&sbq->sb, nr);
	sbq_wake_up(sbq);
	sbq_wake_up(sbq);
	if (likely(!round_robin))
	if (likely(!sbq->round_robin))
		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);