Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2577d67a authored by Mike Snitzer's avatar Mike Snitzer Committed by Greg Kroah-Hartman
Browse files

dm: remove special-casing of bio-based immutable singleton target on NVMe



Commit 9c37de297f6590937f95a28bec1b7ac68a38618f upstream.

There is no benefit to DM special-casing NVMe. Remove all code used to
establish DM_TYPE_NVME_BIO_BASED.

Also, remove 3 'struct mapped_device *md' variables in __map_bio() which
masked the same variable that is available within __map_bio()'s scope.

Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4143503b
Loading
Loading
Loading
Loading
+2 −30
Original line number Diff line number Diff line
@@ -872,8 +872,7 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
	return (table_type == DM_TYPE_BIO_BASED ||
		table_type == DM_TYPE_DAX_BIO_BASED ||
		table_type == DM_TYPE_NVME_BIO_BASED);
		table_type == DM_TYPE_DAX_BIO_BASED);
}

static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -929,8 +928,6 @@ bool dm_table_supports_dax(struct dm_table *t,
	return true;
}

static bool dm_table_does_not_support_partial_completion(struct dm_table *t);

static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
{
@@ -960,7 +957,6 @@ static int dm_table_determine_type(struct dm_table *t)
			goto verify_bio_based;
		}
		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
		BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
		goto verify_rq_based;
	}

@@ -999,15 +995,6 @@ static int dm_table_determine_type(struct dm_table *t)
		if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
			t->type = DM_TYPE_DAX_BIO_BASED;
		} else {
			/* Check if upgrading to NVMe bio-based is valid or required */
			tgt = dm_table_get_immutable_target(t);
			if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
				t->type = DM_TYPE_NVME_BIO_BASED;
				goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
			} else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
				t->type = DM_TYPE_NVME_BIO_BASED;
			}
		}
		return 0;
	}
@@ -1024,8 +1011,7 @@ static int dm_table_determine_type(struct dm_table *t)
	 * (e.g. request completion process for partial completion.)
	 */
	if (t->num_targets > 1) {
		DMERR("%s DM doesn't support multiple targets",
		      t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
		DMERR("request-based DM doesn't support multiple targets");
		return -EINVAL;
	}

@@ -1714,20 +1700,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
	return q && !blk_queue_add_random(q);
}

static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
					sector_t start, sector_t len, void *data)
{
	char b[BDEVNAME_SIZE];

	/* For now, NVMe devices are the only devices of this class */
	return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
}

static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
{
	return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
}

static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
					 sector_t start, sector_t len, void *data)
{
+7 −66
Original line number Diff line number Diff line
@@ -1000,7 +1000,7 @@ static void clone_endio(struct bio *bio)
	struct mapped_device *md = tio->io->md;
	dm_endio_fn endio = tio->ti->type->end_io;

	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
	if (unlikely(error == BLK_STS_TARGET)) {
		if (bio_op(bio) == REQ_OP_DISCARD &&
		    !bio->bi_disk->queue->limits.max_discard_sectors)
			disable_discard(md);
@@ -1325,7 +1325,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
	sector = clone->bi_iter.bi_sector;

	if (unlikely(swap_bios_limit(ti, clone))) {
		struct mapped_device *md = io->md;
		int latch = get_swap_bios();
		if (unlikely(latch != md->swap_bios))
			__set_swap_bios_limit(md, latch);
@@ -1340,24 +1339,17 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
		/* the bio has been remapped so dispatch it */
		trace_block_bio_remap(clone->bi_disk->queue, clone,
				      bio_dev(io->orig_bio), sector);
		if (md->type == DM_TYPE_NVME_BIO_BASED)
			ret = direct_make_request(clone);
		else
		ret = generic_make_request(clone);
		break;
	case DM_MAPIO_KILL:
		if (unlikely(swap_bios_limit(ti, clone))) {
			struct mapped_device *md = io->md;
		if (unlikely(swap_bios_limit(ti, clone)))
			up(&md->swap_bios_semaphore);
		}
		free_tio(tio);
		dec_pending(io, BLK_STS_IOERR);
		break;
	case DM_MAPIO_REQUEUE:
		if (unlikely(swap_bios_limit(ti, clone))) {
			struct mapped_device *md = io->md;
		if (unlikely(swap_bios_limit(ti, clone)))
			up(&md->swap_bios_semaphore);
		}
		free_tio(tio);
		dec_pending(io, BLK_STS_DM_REQUEUE);
		break;
@@ -1732,51 +1724,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
	return ret;
}

/*
 * Optimized variant of __split_and_process_bio that leverages the
 * fact that targets that use it do _not_ have a need to split bios.
 */
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
			      struct bio *bio, struct dm_target *ti)
{
	struct clone_info ci;
	blk_qc_t ret = BLK_QC_T_NONE;
	int error = 0;

	init_clone_info(&ci, md, map, bio);

	if (bio->bi_opf & REQ_PREFLUSH) {
		struct bio flush_bio;

		/*
		 * Use an on-stack bio for this, it's safe since we don't
		 * need to reference it after submit. It's just used as
		 * the basis for the clone(s).
		 */
		bio_init(&flush_bio, NULL, 0);
		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
		ci.bio = &flush_bio;
		ci.sector_count = 0;
		error = __send_empty_flush(&ci);
		bio_uninit(ci.bio);
		/* dec_pending submits any data associated with flush */
	} else {
		struct dm_target_io *tio;

		ci.bio = bio;
		ci.sector_count = bio_sectors(bio);
		if (__process_abnormal_io(&ci, ti, &error))
			goto out;

		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
	}
out:
	/* drop the extra reference count */
	dec_pending(ci.io, errno_to_blk_status(error));
	return ret;
}

static blk_qc_t dm_process_bio(struct mapped_device *md,
			       struct dm_table *map, struct bio *bio)
{
@@ -1807,8 +1754,6 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
		/* regular IO is split by __split_and_process_bio */
	}

	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
		return __process_bio(md, map, bio, ti);
	return __split_and_process_bio(md, map, bio);
}

@@ -2200,12 +2145,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
	if (request_based)
		dm_stop_queue(q);

	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
	if (request_based) {
		/*
		 * Leverage the fact that request-based DM targets and
		 * NVMe bio based targets are immutable singletons
		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
		 *   and __process_bio.
		 * Leverage the fact that request-based DM targets are
		 * immutable singletons - used to optimize dm_mq_queue_rq.
		 */
		md->immutable_target = dm_table_get_immutable_target(t);
	}
@@ -2334,7 +2277,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
		break;
	case DM_TYPE_BIO_BASED:
	case DM_TYPE_DAX_BIO_BASED:
	case DM_TYPE_NVME_BIO_BASED:
		dm_init_congested_fn(md);
		break;
	case DM_TYPE_NONE:
@@ -3070,7 +3012,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
	switch (type) {
	case DM_TYPE_BIO_BASED:
	case DM_TYPE_DAX_BIO_BASED:
	case DM_TYPE_NVME_BIO_BASED:
		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
+0 −1
Original line number Diff line number Diff line
@@ -28,7 +28,6 @@ enum dm_queue_mode {
	DM_TYPE_BIO_BASED	 = 1,
	DM_TYPE_REQUEST_BASED	 = 2,
	DM_TYPE_DAX_BIO_BASED	 = 3,
	DM_TYPE_NVME_BIO_BASED	 = 4,
};

typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;