Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 46040967 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] block: document io scheduler allow_merge_fn hook
  [PATCH] cfq-iosched: don't allow sync merges across queues
  [PATCH] Fixup blk_rq_unmap_user() API
  [PATCH] __blk_rq_unmap_user() fails to return error
  [PATCH] __blk_rq_map_user() doesn't need to grab the queue_lock
  [PATCH] Remove queue merging hooks
  [PATCH] ->nr_sectors and ->hard_nr_sectors are not used for BLOCK_PC requests
  [PATCH] cciss: fix XFER_READ/XFER_WRITE in do_cciss_request
  [PATCH] cciss: set default raid level when reading geometry fails
parents 8df8bb4a 126ec9a6
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -946,6 +946,13 @@ elevator_merged_fn called when a request in the scheduler has been
				scheduler for example, to reposition the request
				if its sorting order has changed.

elevator_allow_merge_fn		called whenever the block layer determines
				that a bio can be merged into an existing
				request safely. The io scheduler may still
				want to stop a merge at this point if it
				results in some sort of conflict internally,
				this hook allows it to do that.

elevator_dispatch_fn		fills the dispatch queue with ready requests.
				I/O schedulers are free to postpone requests by
				not filling the dispatch queue unless @force
+33 −0
Original line number Diff line number Diff line
@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
	cfq_remove_request(next);
}

static int cfq_allow_merge(request_queue_t *q, struct request *rq,
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	const int rw = bio_data_dir(bio);
	struct cfq_queue *cfqq;
	pid_t key;

	/*
	 * If bio is async or a write, always allow merge
	 */
	if (!bio_sync(bio) || rw == WRITE)
		return 1;

	/*
	 * bio is sync. if request is not, disallow.
	 */
	if (!rq_is_sync(rq))
		return 0;

	/*
	 * Ok, both bio and request are sync. Allow merge if they are
	 * from the same queue.
	 */
	key = cfq_queue_pid(current, rw, 1);
	cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
	if (cfqq != RQ_CFQQ(rq))
		return 0;

	return 1;
}

static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = {
		.elevator_merge_fn = 		cfq_merge,
		.elevator_merged_fn =		cfq_merged_request,
		.elevator_merge_req_fn =	cfq_merged_requests,
		.elevator_allow_merge_fn =	cfq_allow_merge,
		.elevator_dispatch_fn =		cfq_dispatch_requests,
		.elevator_add_req_fn =		cfq_insert_request,
		.elevator_activate_req_fn =	cfq_activate_request,
+22 −4
Original line number Diff line number Diff line
@@ -50,6 +50,21 @@ static const int elv_hash_shift = 6;
#define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))

/*
 * Query io scheduler to see if the current process issuing bio may be
 * merged with rq.
 */
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
	request_queue_t *q = rq->q;
	elevator_t *e = q->elevator;

	if (e->ops->elevator_allow_merge_fn)
		return e->ops->elevator_allow_merge_fn(q, rq, bio);

	return 1;
}

/*
 * can we safely merge with this request?
 */
@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
		return 0;

	/*
	 * same device and no special stuff set, merge is ok
	 * must be same device and not a special request
	 */
	if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special)
		return 1;
	if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special)
		return 0;

	if (!elv_iosched_allow_merge(rq, bio))
		return 0;

	return 1;
}
EXPORT_SYMBOL(elv_rq_merge_ok);

+31 −36
Original line number Diff line number Diff line
@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
	return 1;
}

static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
			    struct bio *bio)
int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
{
	unsigned short max_sectors;
	int len;
@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,

	return ll_new_hw_segment(q, req, bio);
}
EXPORT_SYMBOL(ll_back_merge_fn);

static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
			     struct bio *bio)
@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
	}

	q->request_fn		= rfn;
	q->back_merge_fn       	= ll_back_merge_fn;
	q->front_merge_fn      	= ll_front_merge_fn;
	q->merge_requests_fn	= ll_merge_requests_fn;
	q->prep_rq_fn		= NULL;
	q->unplug_fn		= generic_unplug_device;
	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
	else
		bio = bio_copy_user(q, uaddr, len, reading);

	if (IS_ERR(bio)) {
	if (IS_ERR(bio))
		return PTR_ERR(bio);
	}

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	/*
	 * for most (all? don't know of any) queues we could
	 * skip grabbing the queue lock here. only drivers with
	 * funky private ->back_merge_fn() function could be
	 * problematic.
	 */
	spin_lock_irq(q->queue_lock);
	if (!rq->bio)
		blk_rq_bio_prep(q, rq, bio);
	else if (!q->back_merge_fn(q, rq, bio)) {
	else if (!ll_back_merge_fn(q, rq, bio)) {
		ret = -EINVAL;
		spin_unlock_irq(q->queue_lock);
		goto unmap_bio;
	} else {
		rq->biotail->bi_next = bio;
		rq->biotail = bio;

		rq->nr_sectors += bio_sectors(bio);
		rq->hard_nr_sectors = rq->nr_sectors;
		rq->data_len += bio->bi_size;
	}
	spin_unlock_irq(q->queue_lock);

	return bio->bi_size;

@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
		    unsigned long len)
{
	unsigned long bytes_read = 0;
	struct bio *bio = NULL;
	int ret;

	if (len > (q->max_hw_sectors << 9))
@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
		if (ret < 0)
			goto unmap_rq;
		if (!bio)
			bio = rq->bio;
		bytes_read += ret;
		ubuf += ret;
	}
@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
	rq->buffer = rq->data = NULL;
	return 0;
unmap_rq:
	blk_rq_unmap_user(rq);
	blk_rq_unmap_user(bio);
	return ret;
}

@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);

/**
 * blk_rq_unmap_user - unmap a request with user data
 * @rq:		rq to be unmapped
 * @bio:	       start of bio list
 *
 * Description:
 *    Unmap a rq previously mapped by blk_rq_map_user().
 *    rq->bio must be set to the original head of the request.
 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
 *    supply the original rq->bio from the blk_rq_map_user() return, since
 *    the io completion may have changed rq->bio.
 */
int blk_rq_unmap_user(struct request *rq)
int blk_rq_unmap_user(struct bio *bio)
{
	struct bio *bio, *mapped_bio;
	struct bio *mapped_bio;
	int ret = 0, ret2;

	while ((bio = rq->bio)) {
		if (bio_flagged(bio, BIO_BOUNCED))
			mapped_bio = bio->bi_private;
		else
	while (bio) {
		mapped_bio = bio;
		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
			mapped_bio = bio->bi_private;

		__blk_rq_unmap_user(mapped_bio);
		rq->bio = bio->bi_next;
		bio_put(bio);
		ret2 = __blk_rq_unmap_user(mapped_bio);
		if (ret2 && !ret)
			ret = ret2;

		mapped_bio = bio;
		bio = bio->bi_next;
		bio_put(mapped_bio);
	}
	return 0;

	return ret;
}

EXPORT_SYMBOL(blk_rq_unmap_user);
@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!q->merge_requests_fn(q, req, next))
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

	/*
@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
		case ELEVATOR_BACK_MERGE:
			BUG_ON(!rq_mergeable(req));

			if (!q->back_merge_fn(q, req, bio))
			if (!ll_back_merge_fn(q, req, bio))
				break;

			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
		case ELEVATOR_FRONT_MERGE:
			BUG_ON(!rq_mergeable(req));

			if (!q->front_merge_fn(q, req, bio))
			if (!ll_front_merge_fn(q, req, bio))
				break;

			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+1 −2
Original line number Diff line number Diff line
@@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
			hdr->sb_len_wr = len;
	}

	rq->bio = bio;
	if (blk_rq_unmap_user(rq))
	if (blk_rq_unmap_user(bio))
		ret = -EFAULT;

	/* may not have succeeded, but output values written to control
Loading