Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e0d574f authored by Bart Van Assche's avatar Bart Van Assche Committed by Mike Snitzer
Browse files

dm: introduce enum dm_queue_mode to cleanup related code



Introduce an enumeration type for the queue mode.  This patch does
not change any functionality but makes the DM code easier to read.

Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent b194679f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ struct mapped_device {
	struct request_queue *queue;
	int numa_node_id;

	unsigned type;
	enum dm_queue_mode type;
	/* Protect queue and type against concurrent access. */
	struct mutex type_lock;

+1 −1
Original line number Diff line number Diff line
@@ -1260,7 +1260,7 @@ static int populate_table(struct dm_table *table,
	return dm_table_complete(table);
}

static bool is_valid_type(unsigned cur, unsigned new)
static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
{
	if (cur == new ||
	    (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
+4 −1
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ struct multipath {
	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
	atomic_t pg_init_count;		/* Number of times pg_init called */

	unsigned queue_mode;
	enum dm_queue_mode queue_mode;

	struct mutex work_mutex;
	struct work_struct trigger_event;
@@ -1700,6 +1700,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
			case DM_TYPE_MQ_REQUEST_BASED:
				DMEMIT("queue_mode mq ");
				break;
			default:
				WARN_ON_ONCE(true);
				break;
			}
		}
	}
+7 −7
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@

struct dm_table {
	struct mapped_device *md;
	unsigned type;
	enum dm_queue_mode type;

	/* btree table */
	unsigned int depth;
@@ -825,19 +825,19 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
}
EXPORT_SYMBOL(dm_consume_args);

static bool __table_type_bio_based(unsigned table_type)
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
	return (table_type == DM_TYPE_BIO_BASED ||
		table_type == DM_TYPE_DAX_BIO_BASED);
}

static bool __table_type_request_based(unsigned table_type)
static bool __table_type_request_based(enum dm_queue_mode table_type)
{
	return (table_type == DM_TYPE_REQUEST_BASED ||
		table_type == DM_TYPE_MQ_REQUEST_BASED);
}

void dm_table_set_type(struct dm_table *t, unsigned type)
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
{
	t->type = type;
}
@@ -879,7 +879,7 @@ static int dm_table_determine_type(struct dm_table *t)
	struct dm_target *tgt;
	struct dm_dev_internal *dd;
	struct list_head *devices = dm_table_get_devices(t);
	unsigned live_md_type = dm_get_md_type(t->md);
	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);

	if (t->type != DM_TYPE_NONE) {
		/* target already set the table's type */
@@ -988,7 +988,7 @@ static int dm_table_determine_type(struct dm_table *t)
	return 0;
}

unsigned dm_table_get_type(struct dm_table *t)
enum dm_queue_mode dm_table_get_type(struct dm_table *t)
{
	return t->type;
}
@@ -1039,7 +1039,7 @@ bool dm_table_all_blk_mq_devices(struct dm_table *t)

static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
	unsigned type = dm_table_get_type(t);
	enum dm_queue_mode type = dm_table_get_type(t);
	unsigned per_io_data_size = 0;
	struct dm_target *tgt;
	unsigned i;
+7 −4
Original line number Diff line number Diff line
@@ -1807,13 +1807,13 @@ void dm_unlock_md_type(struct mapped_device *md)
	mutex_unlock(&md->type_lock);
}

void dm_set_md_type(struct mapped_device *md, unsigned type)
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{
	BUG_ON(!mutex_is_locked(&md->type_lock));
	md->type = type;
}

unsigned dm_get_md_type(struct mapped_device *md)
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{
	return md->type;
}
@@ -1840,7 +1840,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
	int r;
	unsigned type = dm_get_md_type(md);
	enum dm_queue_mode type = dm_get_md_type(md);

	switch (type) {
	case DM_TYPE_REQUEST_BASED:
@@ -1871,6 +1871,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
		if (type == DM_TYPE_DAX_BIO_BASED)
			queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
		break;
	case DM_TYPE_NONE:
		WARN_ON_ONCE(true);
		break;
	}

	return 0;
@@ -2556,7 +2559,7 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
					    unsigned integrity, unsigned per_io_data_size)
{
	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
Loading