Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 17e149b8 authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm: add 'use_blk_mq' module param and expose in per-device ro sysfs attr



Request-based DM's blk-mq support defaults to off; but a user can easily
change the default using the dm_mod.use_blk_mq module/boot option.

Also, you can check what mode a given request-based DM device is using
with: cat /sys/block/dm-X/dm/use_blk_mq

This change enabled further cleanup and reduced work (e.g. the
md->io_pool and md->rq_pool isn't created if using blk-mq).

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 02233342
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -37,3 +37,11 @@ Description: Allow control over how long a request that is a
		accounting.  This attribute is not applicable to
		bio-based DM devices so it will only ever report 0 for
		them.

What:		/sys/block/dm-<num>/dm/use_blk_mq
Date:		March 2015
KernelVersion:	4.1
Contact:	dm-devel@redhat.com
Description:	Request-based Device-mapper blk-mq I/O path mode.
		Contains the value 1 if the device is using blk-mq.
		Otherwise it contains 0. Read-only attribute.
+11 −0
Original line number Diff line number Diff line
@@ -196,6 +196,17 @@ config BLK_DEV_DM

	  If unsure, say N.

config DM_MQ_DEFAULT
	bool "request-based DM: use blk-mq I/O path by default"
	depends on BLK_DEV_DM
	---help---
	  This option enables the blk-mq based I/O path for request-based
	  DM devices by default.  With the option the dm_mod.use_blk_mq
	  module/boot option defaults to Y, without it to N, but it can
	  still be overriden either way.

	  If unsure say N.

config DM_DEBUG
	bool "Device mapper debugging support"
	depends on BLK_DEV_DM
+9 −0
Original line number Diff line number Diff line
@@ -89,15 +89,24 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
	return strlen(buf);
}

static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
{
	sprintf(buf, "%d\n", dm_use_blk_mq(md));

	return strlen(buf);
}

static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
static DM_ATTR_RO(suspended);
static DM_ATTR_RO(use_blk_mq);
static DM_ATTR_RW(rq_based_seq_io_merge_deadline);

static struct attribute *dm_attrs[] = {
	&dm_attr_name.attr,
	&dm_attr_uuid.attr,
	&dm_attr_suspended.attr,
	&dm_attr_use_blk_mq.attr,
	&dm_attr_rq_based_seq_io_merge_deadline.attr,
	NULL,
};
+3 −3
Original line number Diff line number Diff line
@@ -940,7 +940,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
	return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
}

static int dm_table_alloc_md_mempools(struct dm_table *t)
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
	unsigned type = dm_table_get_type(t);
	unsigned per_bio_data_size = 0;
@@ -958,7 +958,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t)
			per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
		}

	t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
	if (!t->mempools)
		return -ENOMEM;

@@ -1128,7 +1128,7 @@ int dm_table_complete(struct dm_table *t)
		return r;
	}

	r = dm_table_alloc_md_mempools(t);
	r = dm_table_alloc_md_mempools(t, t->md);
	if (r)
		DMERR("unable to allocate mempools");

+41 −12
Original line number Diff line number Diff line
@@ -228,8 +228,20 @@ struct mapped_device {

	/* for blk-mq request-based DM support */
	struct blk_mq_tag_set tag_set;
	bool use_blk_mq;
};

#ifdef CONFIG_DM_MQ_DEFAULT
static bool use_blk_mq = true;
#else
static bool use_blk_mq = false;
#endif

bool dm_use_blk_mq(struct mapped_device *md)
{
	return md->use_blk_mq;
}

/*
 * For mempools pre-allocation at the table loading time.
 */
@@ -2034,7 +2046,7 @@ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
{
	unsigned deadline;

	if (!dm_request_based(md))
	if (!dm_request_based(md) || md->use_blk_mq)
		return count;

	if (kstrtouint(buf, 10, &deadline))
@@ -2222,6 +2234,7 @@ static void dm_init_md_queue(struct mapped_device *md)

static void dm_init_old_md_queue(struct mapped_device *md)
{
	md->use_blk_mq = false;
	dm_init_md_queue(md);

	/*
@@ -2263,6 +2276,7 @@ static struct mapped_device *alloc_dev(int minor)
	if (r < 0)
		goto bad_io_barrier;

	md->use_blk_mq = use_blk_mq;
	md->type = DM_TYPE_NONE;
	mutex_init(&md->suspend_lock);
	mutex_init(&md->type_lock);
@@ -2349,7 +2363,6 @@ static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
	int minor = MINOR(disk_devt(md->disk));
	bool using_blk_mq = !!md->queue->mq_ops;

	unlock_fs(md);
	destroy_workqueue(md->wq);
@@ -2375,7 +2388,7 @@ static void free_dev(struct mapped_device *md)
	del_gendisk(md->disk);
	put_disk(md->disk);
	blk_cleanup_queue(md->queue);
	if (using_blk_mq)
	if (md->use_blk_mq)
		blk_mq_free_tag_set(&md->tag_set);
	bdput(md->bdev);
	free_minor(minor);
@@ -2388,7 +2401,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);

	if (md->io_pool && md->bs) {
	if (md->bs) {
		/* The md already has necessary mempools. */
		if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
			/*
@@ -2798,13 +2811,21 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
	return err;
}

static unsigned filter_md_type(unsigned type, struct mapped_device *md)
{
	if (type == DM_TYPE_BIO_BASED)
		return type;

	return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}

/*
 * Setup the DM device's queue based on md's type
 */
int dm_setup_md_queue(struct mapped_device *md)
{
	int r;
	unsigned md_type = dm_get_md_type(md);
	unsigned md_type = filter_md_type(dm_get_md_type(md), md);

	switch (md_type) {
	case DM_TYPE_REQUEST_BASED:
@@ -3509,16 +3530,19 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
					    unsigned integrity, unsigned per_bio_data_size)
{
	struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
	struct kmem_cache *cachep;
	struct kmem_cache *cachep = NULL;
	unsigned int pool_size = 0;
	unsigned int front_pad;

	if (!pools)
		return NULL;

	type = filter_md_type(type, md);

	switch (type) {
	case DM_TYPE_BIO_BASED:
		cachep = _io_cache;
@@ -3526,13 +3550,13 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
		front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
		break;
	case DM_TYPE_REQUEST_BASED:
		cachep = _rq_tio_cache;
		pool_size = dm_get_reserved_rq_based_ios();
		pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
		if (!pools->rq_pool)
			goto out;
		/* fall through to setup remaining rq-based pools */
	case DM_TYPE_MQ_REQUEST_BASED:
		cachep = _rq_tio_cache;
		if (!pool_size)
			pool_size = dm_get_reserved_rq_based_ios();
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
@@ -3540,12 +3564,14 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
		WARN_ON(per_bio_data_size != 0);
		break;
	default:
		goto out;
		BUG();
	}

	if (cachep) {
		pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
		if (!pools->io_pool)
			goto out;
	}

	pools->bs = bioset_create_nobvec(pool_size, front_pad);
	if (!pools->bs)
@@ -3602,6 +3628,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");

module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");

MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
Loading