Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5248f79 authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm: remove support for stacking dm-mq on .request_fn device(s)



Remove all fiddley code that propped up this support for a blk-mq
request-queue ontop of all .request_fn devices.

Testing has proven this niche request-based dm-mq mode to be buggy, when
testing fault tolerance with DM multipath, and there is no point trying
to preserve it.

Should help improve efficiency of pure dm-mq code and make code
maintenance less delicate.

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 818c5f3b
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -418,7 +418,10 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
	spin_unlock_irq(&m->lock);

	if (clone) {
		/* Old request-based interface: allocated clone is passed in */
		/*
		 * Old request-based interface: allocated clone is passed in.
		 * Used by: .request_fn stacked on .request_fn path(s).
		 */
		clone->q = bdev_get_queue(bdev);
		clone->rq_disk = bdev->bd_disk;
		clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+16 −39
Original line number Diff line number Diff line
@@ -1141,11 +1141,6 @@ static void free_rq_clone(struct request *clone)
	else if (!md->queue->mq_ops)
		/* request_fn queue stacked on request_fn queue(s) */
		free_clone_request(md, clone);
	/*
	 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
	 * no need to call free_clone_request() because we leverage blk-mq by
	 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
	 */

	if (!md->queue->mq_ops)
		free_rq_tio(tio);
@@ -1866,23 +1861,17 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
				struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
	/*
	 * Do not allocate a clone if tio->clone was already set
	 * (see: dm_mq_queue_rq).
	 * Create clone for use with .request_fn request_queue
	 */
	bool alloc_clone = !tio->clone;
	struct request *clone;

	if (alloc_clone) {
	clone = alloc_clone_request(md, gfp_mask);
	if (!clone)
		return NULL;
	} else
		clone = tio->clone;

	blk_rq_init(NULL, clone);
	if (setup_clone(clone, rq, tio, gfp_mask)) {
		/* -ENOMEM */
		if (alloc_clone)
		free_clone_request(md, clone);
		return NULL;
	}
@@ -2692,15 +2681,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
	 */
	tio->ti = ti;

	/*
	 * Both the table and md type cannot change after initial table load
	 */
	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
		/* clone request is allocated at the end of the pdu */
		tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
		(void) clone_rq(rq, md, tio, GFP_ATOMIC);
		queue_kthread_work(&md->kworker, &tio->work);
	} else {
	/* Direct call is fine since .queue_rq allows allocations */
	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
		/* Undo dm_start_request() before requeuing */
@@ -2708,7 +2688,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
		rq_completed(md, rq_data_dir(rq), false);
		return BLK_MQ_RQ_QUEUE_BUSY;
	}
	}

	return BLK_MQ_RQ_QUEUE_OK;
}
@@ -2726,6 +2705,11 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
	struct request_queue *q;
	int err;

	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
		return -EINVAL;
	}

	md->tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL);
	if (!md->tag_set)
		return -ENOMEM;
@@ -2738,10 +2722,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
	md->tag_set->driver_data = md;

	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
	if (md_type == DM_TYPE_REQUEST_BASED) {
		/* put the memory for non-blk-mq clone at the end of the pdu */
		md->tag_set->cmd_size += sizeof(struct request);
	}

	err = blk_mq_alloc_tag_set(md->tag_set);
	if (err)
@@ -2758,9 +2738,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
	blk_mq_register_disk(md->disk);

	if (md_type == DM_TYPE_REQUEST_BASED)
		init_rq_based_worker_thread(md);

	return 0;

out_tag_set: