Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82f402fe authored by Jens Axboe's avatar Jens Axboe
Browse files

null_blk: add support for shared tags



Some storage drivers need to share tag sets between devices. It's
useful to be able to model that with null_blk, to find hangs or
performance issues.

Add a 'shared_tags' bool module parameter that. If that is set to
true and nr_devices is bigger than 1, all devices allocated will
share the same tag set.

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent edf064e7
Loading
Loading
Loading
Loading
+70 −42
Original line number Original line Diff line number Diff line
@@ -35,7 +35,8 @@ struct nullb {
	struct request_queue *q;
	struct request_queue *q;
	struct gendisk *disk;
	struct gendisk *disk;
	struct nvm_dev *ndev;
	struct nvm_dev *ndev;
	struct blk_mq_tag_set tag_set;
	struct blk_mq_tag_set *tag_set;
	struct blk_mq_tag_set __tag_set;
	struct hrtimer timer;
	struct hrtimer timer;
	unsigned int queue_depth;
	unsigned int queue_depth;
	spinlock_t lock;
	spinlock_t lock;
@@ -50,6 +51,7 @@ static struct mutex lock;
static int null_major;
static int null_major;
static int nullb_indexes;
static int nullb_indexes;
static struct kmem_cache *ppa_cache;
static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;


enum {
enum {
	NULL_IRQ_NONE		= 0,
	NULL_IRQ_NONE		= 0,
@@ -109,7 +111,7 @@ static int bs = 512;
module_param(bs, int, S_IRUGO);
module_param(bs, int, S_IRUGO);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
MODULE_PARM_DESC(bs, "Block size (in bytes)");


static int nr_devices = 2;
static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
MODULE_PARM_DESC(nr_devices, "Number of devices to register");


@@ -121,6 +123,10 @@ static bool blocking;
module_param(blocking, bool, S_IRUGO);
module_param(blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");


static bool shared_tags;
module_param(shared_tags, bool, S_IRUGO);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");

static int irqmode = NULL_IRQ_SOFTIRQ;
static int irqmode = NULL_IRQ_SOFTIRQ;


static int null_set_irqmode(const char *str, const struct kernel_param *kp)
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -376,31 +382,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
	return BLK_STS_OK;
	return BLK_STS_OK;
}
}


static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
	BUG_ON(!nullb);
	BUG_ON(!nq);

	init_waitqueue_head(&nq->wait);
	nq->queue_depth = nullb->queue_depth;
}

static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int index)
{
	struct nullb *nullb = data;
	struct nullb_queue *nq = &nullb->queues[index];

	hctx->driver_data = nq;
	null_init_queue(nullb, nq);
	nullb->nr_queues++;

	return 0;
}

static const struct blk_mq_ops null_mq_ops = {
static const struct blk_mq_ops null_mq_ops = {
	.queue_rq       = null_queue_rq,
	.queue_rq       = null_queue_rq,
	.init_hctx	= null_init_hctx,
	.complete	= null_softirq_done_fn,
	.complete	= null_softirq_done_fn,
};
};


@@ -592,8 +575,8 @@ static void null_del_dev(struct nullb *nullb)
	else
	else
		del_gendisk(nullb->disk);
		del_gendisk(nullb->disk);
	blk_cleanup_queue(nullb->q);
	blk_cleanup_queue(nullb->q);
	if (queue_mode == NULL_Q_MQ)
	if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
		blk_mq_free_tag_set(&nullb->tag_set);
		blk_mq_free_tag_set(nullb->tag_set);
	if (!use_lightnvm)
	if (!use_lightnvm)
		put_disk(nullb->disk);
		put_disk(nullb->disk);
	cleanup_queues(nullb);
	cleanup_queues(nullb);
@@ -615,6 +598,32 @@ static const struct block_device_operations null_fops = {
	.release =	null_release,
	.release =	null_release,
};
};


static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
	BUG_ON(!nullb);
	BUG_ON(!nq);

	init_waitqueue_head(&nq->wait);
	nq->queue_depth = nullb->queue_depth;
}

static void null_init_queues(struct nullb *nullb)
{
	struct request_queue *q = nullb->q;
	struct blk_mq_hw_ctx *hctx;
	struct nullb_queue *nq;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (!hctx->nr_ctx || !hctx->tags)
			continue;
		nq = &nullb->queues[i];
		hctx->driver_data = nq;
		null_init_queue(nullb, nq);
		nullb->nr_queues++;
	}
}

static int setup_commands(struct nullb_queue *nq)
static int setup_commands(struct nullb_queue *nq)
{
{
	struct nullb_cmd *cmd;
	struct nullb_cmd *cmd;
@@ -695,6 +704,22 @@ static int null_gendisk_register(struct nullb *nullb)
	return 0;
	return 0;
}
}


static int null_init_tag_set(struct blk_mq_tag_set *set)
{
	set->ops = &null_mq_ops;
	set->nr_hw_queues = submit_queues;
	set->queue_depth = hw_queue_depth;
	set->numa_node = home_node;
	set->cmd_size	= sizeof(struct nullb_cmd);
	set->flags = BLK_MQ_F_SHOULD_MERGE;
	set->driver_data = NULL;

	if (blocking)
		set->flags |= BLK_MQ_F_BLOCKING;

	return blk_mq_alloc_tag_set(set);
}

static int null_add_dev(void)
static int null_add_dev(void)
{
{
	struct nullb *nullb;
	struct nullb *nullb;
@@ -716,26 +741,23 @@ static int null_add_dev(void)
		goto out_free_nullb;
		goto out_free_nullb;


	if (queue_mode == NULL_Q_MQ) {
	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		if (shared_tags) {
		nullb->tag_set.nr_hw_queues = submit_queues;
			nullb->tag_set = &tag_set;
		nullb->tag_set.queue_depth = hw_queue_depth;
			rv = 0;
		nullb->tag_set.numa_node = home_node;
		} else {
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
			nullb->tag_set = &nullb->__tag_set;
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
			rv = null_init_tag_set(nullb->tag_set);
		nullb->tag_set.driver_data = nullb;
		}

		if (blocking)
			nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;


		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
		if (rv)
			goto out_cleanup_queues;
			goto out_cleanup_queues;


		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		nullb->q = blk_mq_init_queue(nullb->tag_set);
		if (IS_ERR(nullb->q)) {
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			rv = -ENOMEM;
			goto out_cleanup_tags;
			goto out_cleanup_tags;
		}
		}
		null_init_queues(nullb);
	} else if (queue_mode == NULL_Q_BIO) {
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
		if (!nullb->q) {
@@ -788,8 +810,8 @@ static int null_add_dev(void)
out_cleanup_blk_queue:
out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
	if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
		blk_mq_free_tag_set(&nullb->tag_set);
		blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
out_cleanup_queues:
	cleanup_queues(nullb);
	cleanup_queues(nullb);
out_free_nullb:
out_free_nullb:
@@ -822,6 +844,9 @@ static int __init null_init(void)
		queue_mode = NULL_Q_MQ;
		queue_mode = NULL_Q_MQ;
	}
	}


	if (queue_mode == NULL_Q_MQ && shared_tags)
		null_init_tag_set(&tag_set);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
	if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
		if (submit_queues < nr_online_nodes) {
		if (submit_queues < nr_online_nodes) {
			pr_warn("null_blk: submit_queues param is set to %u.",
			pr_warn("null_blk: submit_queues param is set to %u.",
@@ -882,6 +907,9 @@ static void __exit null_exit(void)
	}
	}
	mutex_unlock(&lock);
	mutex_unlock(&lock);


	if (queue_mode == NULL_Q_MQ && shared_tags)
		blk_mq_free_tag_set(&tag_set);

	kmem_cache_destroy(ppa_cache);
	kmem_cache_destroy(ppa_cache);
}
}