Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb45f493 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device mapper bugfixes from Mike Snitzer:
 "Fix two bugs in the request-based DM blk-mq support that was added
  during the 4.1 merge"

* tag 'dm-4.1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: fix free_rq_clone() NULL pointer when requeueing unmapped request
  dm: only initialize the request_queue once
parents 9c4249c8 aa6df8dd
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -1298,14 +1298,9 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
		goto err_unlock_md_type;
	}

	if (dm_get_md_type(md) == DM_TYPE_NONE)
	if (dm_get_md_type(md) == DM_TYPE_NONE) {
		/* Initial table load: acquire type of table. */
		dm_set_md_type(md, dm_table_get_type(t));
	else if (dm_get_md_type(md) != dm_table_get_type(t)) {
		DMWARN("can't change device type after initial table load.");
		r = -EINVAL;
		goto err_unlock_md_type;
	}

		/* setup md->queue to reflect md's type (may block) */
		r = dm_setup_md_queue(md);
@@ -1313,6 +1308,12 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
			DMWARN("unable to set up device queue for new table.");
			goto err_unlock_md_type;
		}
	} else if (dm_get_md_type(md) != dm_table_get_type(t)) {
		DMWARN("can't change device type after initial table load.");
		r = -EINVAL;
		goto err_unlock_md_type;
	}

	dm_unlock_md_type(md);

	/* stage inactive table */
+12 −7
Original line number Diff line number Diff line
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
	dm_put(md);
}

static void free_rq_clone(struct request *clone)
static void free_rq_clone(struct request *clone, bool must_be_mapped)
{
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;

	WARN_ON_ONCE(must_be_mapped && !clone->q);

	blk_rq_unprep_clone(clone);

	if (clone->q->mq_ops)
	if (md->type == DM_TYPE_MQ_REQUEST_BASED)
		/* stacked on blk-mq queue(s) */
		tio->ti->type->release_clone_rq(clone);
	else if (!md->queue->mq_ops)
		/* request_fn queue stacked on request_fn queue(s) */
		free_clone_request(md, clone);
	/*
	 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
	 * no need to call free_clone_request() because we leverage blk-mq by
	 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
	 */

	if (!md->queue->mq_ops)
		free_rq_tio(tio);
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
			rq->sense_len = clone->sense_len;
	}

	free_rq_clone(clone);
	free_rq_clone(clone, true);
	if (!rq->q->mq_ops)
		blk_end_request_all(rq, error);
	else
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
	}

	if (clone)
		free_rq_clone(clone);
		free_rq_clone(clone, false);
}

/*
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
{
	struct request_queue *q = NULL;

	if (md->queue->elevator)
		return 0;

	/* Fully initialize the queue */
	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
	if (!q)