Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e75f906 authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe
Browse files

[PATCH] block: support larger block pc requests



This patch modifies blk_rq_map/unmap_user() and the cdrom and scsi_ioctl.c
users so that it supports requests larger than bio by chaining them together.

Signed-off-by: default avatarMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent ad2d7225
Loading
Loading
Loading
Loading
+126 −40
Original line number Diff line number Diff line
@@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq,

EXPORT_SYMBOL(blk_insert_request);

static int __blk_rq_unmap_user(struct bio *bio)
{
	int ret = 0;

	if (bio) {
		if (bio_flagged(bio, BIO_USER_MAPPED))
			bio_unmap_user(bio);
		else
			ret = bio_uncopy_user(bio);
	}

	return ret;
}

static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
			     void __user *ubuf, unsigned int len)
{
	unsigned long uaddr;
	struct bio *bio, *orig_bio;
	int reading, ret;

	reading = rq_data_dir(rq) == READ;

	/*
	 * if alignment requirement is satisfied, map in user pages for
	 * direct dma. else, set up kernel bounce buffers
	 */
	uaddr = (unsigned long) ubuf;
	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
		bio = bio_map_user(q, NULL, uaddr, len, reading);
	else
		bio = bio_copy_user(q, uaddr, len, reading);

	if (IS_ERR(bio)) {
		return PTR_ERR(bio);
	}

	orig_bio = bio;
	blk_queue_bounce(q, &bio);
	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	/*
	 * for most (all? don't know of any) queues we could
	 * skip grabbing the queue lock here. only drivers with
	 * funky private ->back_merge_fn() function could be
	 * problematic.
	 */
	spin_lock_irq(q->queue_lock);
	if (!rq->bio)
		blk_rq_bio_prep(q, rq, bio);
	else if (!q->back_merge_fn(q, rq, bio)) {
		ret = -EINVAL;
		spin_unlock_irq(q->queue_lock);
		goto unmap_bio;
	} else {
		rq->biotail->bi_next = bio;
		rq->biotail = bio;

		rq->nr_sectors += bio_sectors(bio);
		rq->hard_nr_sectors = rq->nr_sectors;
		rq->data_len += bio->bi_size;
	}
	spin_unlock_irq(q->queue_lock);

	return bio->bi_size;

unmap_bio:
	/* if it was boucned we must call the end io function */
	bio_endio(bio, bio->bi_size, 0);
	__blk_rq_unmap_user(orig_bio);
	bio_put(bio);
	return ret;
}

/**
 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
 * @q:		request queue where request should be inserted
@@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
 *    unmapping.
 */
int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
		    unsigned int len)
		    unsigned long len)
{
	unsigned long uaddr;
	struct bio *bio;
	int reading;
	unsigned long bytes_read = 0;
	int ret;

	if (len > (q->max_hw_sectors << 9))
		return -EINVAL;
	if (!len || !ubuf)
		return -EINVAL;

	reading = rq_data_dir(rq) == READ;
	while (bytes_read != len) {
		unsigned long map_len, end, start;

		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
								>> PAGE_SHIFT;
		start = (unsigned long)ubuf >> PAGE_SHIFT;

		/*
	 * if alignment requirement is satisfied, map in user pages for
	 * direct dma. else, set up kernel bounce buffers
		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
		 * pages. If this happens we just lower the requested
		 * mapping len by a page so that we can fit
		 */
	uaddr = (unsigned long) ubuf;
	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
		bio = bio_map_user(q, NULL, uaddr, len, reading);
	else
		bio = bio_copy_user(q, uaddr, len, reading);
		if (end - start > BIO_MAX_PAGES)
			map_len -= PAGE_SIZE;

	if (!IS_ERR(bio)) {
		rq->bio = rq->biotail = bio;
		blk_rq_bio_prep(q, rq, bio);
		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
		if (ret < 0)
			goto unmap_rq;
		bytes_read += ret;
		ubuf += ret;
	}

	rq->buffer = rq->data = NULL;
		rq->data_len = len;
	return 0;
	}

	/*
	 * bio is the err-ptr
	 */
	return PTR_ERR(bio);
unmap_rq:
	blk_rq_unmap_user(rq);
	return ret;
}

EXPORT_SYMBOL(blk_rq_map_user);
@@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
 *    unmapping.
 */
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
			struct sg_iovec *iov, int iov_count)
			struct sg_iovec *iov, int iov_count, unsigned int len)
{
	struct bio *bio;

@@ -2418,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
	if (IS_ERR(bio))
		return PTR_ERR(bio);

	rq->bio = rq->biotail = bio;
	if (bio->bi_size != len) {
		bio_endio(bio, bio->bi_size, 0);
		bio_unmap_user(bio);
		return -EINVAL;
	}

	bio_get(bio);
	blk_rq_bio_prep(q, rq, bio);
	rq->buffer = rq->data = NULL;
	rq->data_len = bio->bi_size;
	return 0;
}

@@ -2429,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);

/**
 * blk_rq_unmap_user - unmap a request with user data
 * @bio:	bio to be unmapped
 * @ulen:	length of user buffer
 * @rq:		rq to be unmapped
 *
 * Description:
 *    Unmap a bio previously mapped by blk_rq_map_user().
 *    Unmap a rq previously mapped by blk_rq_map_user().
 *    rq->bio must be set to the original head of the request.
 */
int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
int blk_rq_unmap_user(struct request *rq)
{
	int ret = 0;
	struct bio *bio, *mapped_bio;

	if (bio) {
		if (bio_flagged(bio, BIO_USER_MAPPED))
			bio_unmap_user(bio);
	while ((bio = rq->bio)) {
		if (bio_flagged(bio, BIO_BOUNCED))
			mapped_bio = bio->bi_private;
		else
			ret = bio_uncopy_user(bio);
	}
			mapped_bio = bio;

		__blk_rq_unmap_user(mapped_bio);
		rq->bio = bio->bi_next;
		bio_put(bio);
	}
	return 0;
}

@@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
	if (rq_data_dir(rq) == WRITE)
		bio->bi_rw |= (1 << BIO_RW);

	rq->bio = rq->biotail = bio;
	blk_rq_bio_prep(q, rq, bio);

	rq->buffer = rq->data = NULL;
	rq->data_len = len;
	return 0;
}

@@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
	rq->buffer = bio_data(bio);
	rq->data_len = bio->bi_size;

	rq->bio = rq->biotail = bio;
}
+26 −27
Original line number Diff line number Diff line
@@ -226,7 +226,6 @@ static int sg_io(struct file *file, request_queue_t *q,
	unsigned long start_time;
	int writing = 0, ret = 0;
	struct request *rq;
	struct bio *bio;
	char sense[SCSI_SENSE_BUFFERSIZE];
	unsigned char cmd[BLK_MAX_CDB];

@@ -258,30 +257,6 @@ static int sg_io(struct file *file, request_queue_t *q,
	if (!rq)
		return -ENOMEM;

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		struct sg_iovec *iov;

		iov = kmalloc(size, GFP_KERNEL);
		if (!iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(iov, hdr->dxferp, size)) {
			kfree(iov);
			ret = -EFAULT;
			goto out;
		}

		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);

	if (ret)
		goto out;

	/*
	 * fill in request structure
	 */
@@ -294,7 +269,6 @@ static int sg_io(struct file *file, request_queue_t *q,
	rq->sense_len = 0;

	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	bio = rq->bio;

	/*
	 * bounce this after holding a reference to the original bio, it's
@@ -309,6 +283,31 @@ static int sg_io(struct file *file, request_queue_t *q,
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_TIMEOUT;

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		struct sg_iovec *iov;

		iov = kmalloc(size, GFP_KERNEL);
		if (!iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(iov, hdr->dxferp, size)) {
			kfree(iov);
			ret = -EFAULT;
			goto out;
		}

		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
					  hdr->dxfer_len);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);

	if (ret)
		goto out;

	rq->retries = 0;

	start_time = jiffies;
@@ -339,7 +338,7 @@ static int sg_io(struct file *file, request_queue_t *q,
			hdr->sb_len_wr = len;
	}

	if (blk_rq_unmap_user(bio, hdr->dxfer_len))
	if (blk_rq_unmap_user(rq))
		ret = -EFAULT;

	/* may not have succeeded, but output values written to control
+2 −4
Original line number Diff line number Diff line
@@ -2133,16 +2133,14 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
		rq->timeout = 60 * HZ;
		bio = rq->bio;

		if (rq->bio)
			blk_queue_bounce(q, &rq->bio);

		if (blk_execute_rq(q, cdi->disk, rq, 0)) {
			struct request_sense *s = rq->sense;
			ret = -EIO;
			cdi->last_sense = s->sense_key;
		}

		if (blk_rq_unmap_user(bio, len))
		rq->bio = bio;
		if (blk_rq_unmap_user(rq))
			ret = -EFAULT;

		if (ret)
+2 −16
Original line number Diff line number Diff line
@@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
			break;
		}

		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
			ret = -EINVAL;
		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
			break;
		}

		len -= bytes;
	}
@@ -750,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
			     int write_to_vm)
{
	struct bio *bio;
	int len = 0, i;

	bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);

@@ -765,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
	 */
	bio_get(bio);

	for (i = 0; i < iov_count; i++)
		len += iov[i].iov_len;

	if (bio->bi_size == len)
	return bio;

	/*
	 * don't support partial mappings
	 */
	bio_endio(bio, bio->bi_size, 0);
	bio_unmap_user(bio);
	return ERR_PTR(-EINVAL);
}

static void __bio_unmap_user(struct bio *bio)
+4 −3
Original line number Diff line number Diff line
@@ -678,10 +678,11 @@ extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
extern void blk_start_queueing(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
extern int blk_rq_unmap_user(struct bio *, unsigned int);
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
extern int blk_rq_unmap_user(struct request *);
extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
			       struct sg_iovec *, int, unsigned int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
			  struct request *, int);
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,