Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 466d89a6 authored by Keith Busch's avatar Keith Busch Committed by Mike Snitzer
Browse files

dm: prepare for allocating blk-mq clone requests in target



For blk-mq request-based DM the responsibility of allocating a cloned
request will be transfered from DM core to the target type.

To prepare for conditionally using this new model the original
request's 'special' now points to the dm_rq_target_io because the
clone is allocated later in the block layer rather than in DM core.

Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 2eb6e1e3
Loading
Loading
Loading
Loading
+66 −68
Original line number Diff line number Diff line
@@ -1016,7 +1016,7 @@ static void end_clone_bio(struct bio *clone, int error)
 * the md may be freed in dm_put() at the end of this function.
 * Or do dm_get() before calling this function and dm_put() later.
 */
static void rq_completed(struct mapped_device *md, int rw, int run_queue)
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
	atomic_dec(&md->pending[rw]);

@@ -1050,7 +1050,8 @@ static void free_rq_clone(struct request *clone)

/*
 * Complete the clone and the original request.
 * Must be called without queue lock.
 * Must be called without clone's queue lock held,
 * see end_clone_request() for more details.
 */
static void dm_end_request(struct request *clone, int error)
{
@@ -1079,7 +1080,8 @@ static void dm_end_request(struct request *clone, int error)

static void dm_unprep_request(struct request *rq)
{
	struct request *clone = rq->special;
	struct dm_rq_target_io *tio = rq->special;
	struct request *clone = tio->clone;

	rq->special = NULL;
	rq->cmd_flags &= ~REQ_DONTPREP;
@@ -1090,12 +1092,10 @@ static void dm_unprep_request(struct request *rq)
/*
 * Requeue the original request of a clone.
 */
static void dm_requeue_unmapped_request(struct request *clone)
static void dm_requeue_unmapped_original_request(struct mapped_device *md,
						 struct request *rq)
{
	int rw = rq_data_dir(clone);
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;
	int rw = rq_data_dir(rq);
	struct request_queue *q = rq->q;
	unsigned long flags;

@@ -1105,7 +1105,14 @@ static void dm_requeue_unmapped_request(struct request *clone)
	blk_requeue_request(q, rq);
	spin_unlock_irqrestore(q->queue_lock, flags);

	rq_completed(md, rw, 0);
	rq_completed(md, rw, false);
}

static void dm_requeue_unmapped_request(struct request *clone)
{
	struct dm_rq_target_io *tio = clone->end_io_data;

	dm_requeue_unmapped_original_request(tio->md, tio->orig);
}

static void __stop_queue(struct request_queue *q)
@@ -1175,8 +1182,8 @@ static void dm_done(struct request *clone, int error, bool mapped)
static void dm_softirq_done(struct request *rq)
{
	bool mapped = true;
	struct request *clone = rq->completion_data;
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct dm_rq_target_io *tio = rq->special;
	struct request *clone = tio->clone;

	if (rq->cmd_flags & REQ_FAILED)
		mapped = false;
@@ -1188,13 +1195,11 @@ static void dm_softirq_done(struct request *rq)
 * Complete the clone and the original request with the error status
 * through softirq context.
 */
static void dm_complete_request(struct request *clone, int error)
static void dm_complete_request(struct request *rq, int error)
{
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct request *rq = tio->orig;
	struct dm_rq_target_io *tio = rq->special;

	tio->error = error;
	rq->completion_data = clone;
	blk_complete_request(rq);
}

@@ -1204,20 +1209,19 @@ static void dm_complete_request(struct request *clone, int error)
 * Target's rq_end_io() function isn't called.
 * This may be used when the target's map_rq() function fails.
 */
static void dm_kill_unmapped_request(struct request *clone, int error)
static void dm_kill_unmapped_request(struct request *rq, int error)
{
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct request *rq = tio->orig;

	rq->cmd_flags |= REQ_FAILED;
	dm_complete_request(clone, error);
	dm_complete_request(rq, error);
}

/*
 * Called with the queue lock held
 * Called with the clone's queue lock held
 */
static void end_clone_request(struct request *clone, int error)
{
	struct dm_rq_target_io *tio = clone->end_io_data;

	/*
	 * For just cleaning up the information of the queue in which
	 * the clone was dispatched.
@@ -1228,13 +1232,13 @@ static void end_clone_request(struct request *clone, int error)

	/*
	 * Actual request completion is done in a softirq context which doesn't
	 * hold the queue lock.  Otherwise, deadlock could occur because:
	 * hold the clone's queue lock.  Otherwise, deadlock could occur because:
	 *     - another request may be submitted by the upper level driver
	 *       of the stacking during the completion
	 *     - the submission which requires queue lock may be done
	 *       against this queue
	 *       against this clone's queue
	 */
	dm_complete_request(clone, error);
	dm_complete_request(tio->orig, error);
}

/*
@@ -1712,16 +1716,17 @@ static void dm_request(struct request_queue *q, struct bio *bio)
		_dm_request(q, bio);
}

static void dm_dispatch_request(struct request *rq)
static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
	int r;

	if (blk_queue_io_stat(rq->q))
		rq->cmd_flags |= REQ_IO_STAT;
	if (blk_queue_io_stat(clone->q))
		clone->cmd_flags |= REQ_IO_STAT;

	rq->start_time = jiffies;
	r = blk_insert_cloned_request(rq->q, rq);
	clone->start_time = jiffies;
	r = blk_insert_cloned_request(clone->q, clone);
	if (r)
		/* must complete clone in terms of original request */
		dm_complete_request(rq, r);
}

@@ -1760,7 +1765,7 @@ static int setup_clone(struct request *clone, struct request *rq,
	return 0;
}

static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
				struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
	struct request *clone = alloc_clone_request(md, gfp_mask);
@@ -1780,10 +1785,9 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,

static void map_tio_request(struct kthread_work *work);

static struct request *clone_rq(struct request *rq, struct mapped_device *md,
				gfp_t gfp_mask)
static struct dm_rq_target_io *prep_tio(struct request *rq,
					struct mapped_device *md, gfp_t gfp_mask)
{
	struct request *clone;
	struct dm_rq_target_io *tio;

	tio = alloc_rq_tio(md, gfp_mask);
@@ -1798,13 +1802,12 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
	memset(&tio->info, 0, sizeof(tio->info));
	init_kthread_work(&tio->work, map_tio_request);

	clone = __clone_rq(rq, md, tio, GFP_ATOMIC);
	if (!clone) {
	if (!clone_rq(rq, md, tio, gfp_mask)) {
		free_rq_tio(tio);
		return NULL;
	}

	return clone;
	return tio;
}

/*
@@ -1813,18 +1816,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
	struct mapped_device *md = q->queuedata;
	struct request *clone;
	struct dm_rq_target_io *tio;

	if (unlikely(rq->special)) {
		DMWARN("Already has something in rq->special.");
		return BLKPREP_KILL;
	}

	clone = clone_rq(rq, md, GFP_ATOMIC);
	if (!clone)
	tio = prep_tio(rq, md, GFP_ATOMIC);
	if (!tio)
		return BLKPREP_DEFER;

	rq->special = clone;
	rq->special = tio;
	rq->cmd_flags |= REQ_DONTPREP;

	return BLKPREP_OK;
@@ -1835,11 +1838,12 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
 * 0  : the request has been processed (not requeued)
 * !0 : the request has been requeued
 */
static int map_request(struct dm_target *ti, struct request *clone,
static int map_request(struct dm_target *ti, struct request *rq,
		       struct mapped_device *md)
{
	int r, requeued = 0;
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct dm_rq_target_io *tio = rq->special;
	struct request *clone = tio->clone;

	r = ti->type->map_rq(ti, clone, &tio->info);
	switch (r) {
@@ -1849,8 +1853,8 @@ static int map_request(struct dm_target *ti, struct request *clone,
	case DM_MAPIO_REMAPPED:
		/* The target has remapped the I/O so dispatch it */
		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
				     blk_rq_pos(tio->orig));
		dm_dispatch_request(clone);
				     blk_rq_pos(rq));
		dm_dispatch_clone_request(clone, rq);
		break;
	case DM_MAPIO_REQUEUE:
		/* The target wants to requeue the I/O */
@@ -1864,7 +1868,7 @@ static int map_request(struct dm_target *ti, struct request *clone,
		}

		/* The target wants to complete the I/O */
		dm_kill_unmapped_request(clone, r);
		dm_kill_unmapped_request(rq, r);
		break;
	}

@@ -1875,16 +1879,13 @@ static void map_tio_request(struct kthread_work *work)
{
	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);

	map_request(tio->ti, tio->clone, tio->md);
	map_request(tio->ti, tio->orig, tio->md);
}

static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
	struct request *clone;

	blk_start_request(orig);
	clone = orig->special;
	atomic_inc(&md->pending[rq_data_dir(clone)]);
	atomic_inc(&md->pending[rq_data_dir(orig)]);

	/*
	 * Hold the md reference here for the in-flight I/O.
@@ -1894,8 +1895,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
	 * See the comment in rq_completed() too.
	 */
	dm_get(md);

	return clone;
}

/*
@@ -1908,7 +1907,7 @@ static void dm_request_fn(struct request_queue *q)
	int srcu_idx;
	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
	struct dm_target *ti;
	struct request *rq, *clone;
	struct request *rq;
	struct dm_rq_target_io *tio;
	sector_t pos;

@@ -1931,19 +1930,19 @@ static void dm_request_fn(struct request_queue *q)
		ti = dm_table_find_target(map, pos);
		if (!dm_target_is_valid(ti)) {
			/*
			 * Must perform setup, that dm_done() requires,
			 * Must perform setup, that rq_completed() requires,
			 * before calling dm_kill_unmapped_request
			 */
			DMERR_LIMIT("request attempted access beyond the end of device");
			clone = dm_start_request(md, rq);
			dm_kill_unmapped_request(clone, -EIO);
			dm_start_request(md, rq);
			dm_kill_unmapped_request(rq, -EIO);
			continue;
		}

		if (ti->type->busy && ti->type->busy(ti))
			goto delay_and_out;

		clone = dm_start_request(md, rq);
		dm_start_request(md, rq);

		tio = rq->special;
		/* Establish tio->ti before queuing work (map_tio_request) */
@@ -2240,7 +2239,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
			bioset_free(md->bs);
			md->bs = p->bs;
			p->bs = NULL;
		} else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
		}
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
@@ -2249,7 +2248,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
		 * to bio from the old bioset, so you must walk
		 * through the queue to unprep.
		 */
		}
		goto out;
	}