Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d6af73d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: support large requests in blk_rq_map_user_iov



This patch adds support for larger requests in blk_rq_map_user_iov by
allowing it to build multiple bios for a request.  This functionality
used to exist for the non-vectored blk_rq_map_user in the past, and
this patch reuses the existing functionality for it on the unmap side,
which stuck around.  Thanks to the iov_iter API supporting multiple
bios is fairly trivial, as we can just iterate the iov until we've
consumed the whole iov_iter.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reported-by: default avatarJeff Lien <Jeff.Lien@hgst.com>
Tested-by: default avatarJeff Lien <Jeff.Lien@hgst.com>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent f2101842
Loading
Loading
Loading
Loading
+61 −30
Original line number Diff line number Diff line
@@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
	return ret;
}

static int __blk_rq_map_user_iov(struct request *rq,
		struct rq_map_data *map_data, struct iov_iter *iter,
		gfp_t gfp_mask, bool copy)
{
	struct request_queue *q = rq->q;
	struct bio *bio, *orig_bio;
	int ret;

	if (copy)
		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
	else
		bio = bio_map_user_iov(q, iter, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (map_data && map_data->null_mapped)
		bio_set_flag(bio, BIO_NULL_MAPPED);

	iov_iter_advance(iter, bio->bi_iter.bi_size);
	if (map_data)
		map_data->offset += bio->bi_iter.bi_size;

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (ret) {
		bio_endio(bio);
		__blk_rq_unmap_user(orig_bio);
		bio_put(bio);
		return ret;
	}

	return 0;
}

/**
 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 * @q:		request queue where request should be inserted
@@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
			struct rq_map_data *map_data,
			const struct iov_iter *iter, gfp_t gfp_mask)
{
	struct bio *bio;
	int unaligned = 0;
	struct iov_iter i;
	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
	bool copy = (q->dma_pad_mask & iter->count) || map_data;
	struct bio *bio = NULL;
	struct iov_iter i;
	int ret;

	if (!iter || !iter->count)
		return -EINVAL;
@@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
		 */
		if ((uaddr & queue_dma_alignment(q)) ||
		    iovec_gap_to_prv(q, &prv, &iov))
			unaligned = 1;
			copy = true;

		prv.iov_base = iov.iov_base;
		prv.iov_len = iov.iov_len;
	}

	if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
	else
		bio = bio_map_user_iov(q, iter, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (map_data && map_data->null_mapped)
		bio_set_flag(bio, BIO_NULL_MAPPED);

	if (bio->bi_iter.bi_size != iter->count) {
		/*
		 * Grab an extra reference to this bio, as bio_unmap_user()
		 * expects to be able to drop it twice as it happens on the
		 * normal IO completion path
		 */
		bio_get(bio);
		bio_endio(bio);
		__blk_rq_unmap_user(bio);
		return -EINVAL;
	}
	i = *iter;
	do {
		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
		if (ret)
			goto unmap_rq;
		if (!bio)
			bio = rq->bio;
	} while (iov_iter_count(&i));

	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;

	blk_queue_bounce(q, &bio);
	bio_get(bio);
	blk_rq_bio_prep(q, rq, bio);
	return 0;

unmap_rq:
	__blk_rq_unmap_user(bio);
	rq->bio = NULL;
	return -EINVAL;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);