Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f31dc1cd authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe
Browse files

block: Consolidate command flag and queue limit checks for merges



 - blk_check_merge_flags() verifies that cmd_flags / bi_rw are
   compatible. This function is called for both req-req and req-bio
   merging.

 - blk_rq_get_max_sectors() and blk_queue_get_max_sectors() can be used
   to query the maximum sector count for a given request or queue. The
   calls will return the right value from the queue limits given the
   type of command (RW, discard, write same, etc.)

Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e2a60da7
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -1866,8 +1866,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
	if (!rq_mergeable(rq))
	if (!rq_mergeable(rq))
		return 0;
		return 0;


	if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
		printk(KERN_ERR "%s: over max size limit.\n", __func__);
		printk(KERN_ERR "%s: over max size limit.\n", __func__);
		return -EIO;
		return -EIO;
	}
	}
+12 −18
Original line number Original line Diff line number Diff line
@@ -275,14 +275,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
int ll_back_merge_fn(struct request_queue *q, struct request *req,
int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
		     struct bio *bio)
{
{
	unsigned short max_sectors;
	if (blk_rq_sectors(req) + bio_sectors(bio) >

	    blk_rq_get_max_sectors(req)) {
	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
		max_sectors = queue_max_hw_sectors(q);
	else
		max_sectors = queue_max_sectors(q);

	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
		req->cmd_flags |= REQ_NOMERGE;
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
		if (req == q->last_merge)
			q->last_merge = NULL;
			q->last_merge = NULL;
@@ -299,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
int ll_front_merge_fn(struct request_queue *q, struct request *req,
int ll_front_merge_fn(struct request_queue *q, struct request *req,
		      struct bio *bio)
		      struct bio *bio)
{
{
	unsigned short max_sectors;
	if (blk_rq_sectors(req) + bio_sectors(bio) >

	    blk_rq_get_max_sectors(req)) {
	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
		max_sectors = queue_max_hw_sectors(q);
	else
		max_sectors = queue_max_sectors(q);


	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
		req->cmd_flags |= REQ_NOMERGE;
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
		if (req == q->last_merge)
			q->last_merge = NULL;
			q->last_merge = NULL;
@@ -338,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
	/*
	/*
	 * Will it become too large?
	 * Will it become too large?
	 */
	 */
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
	    blk_rq_get_max_sectors(req))
		return 0;
		return 0;


	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -417,6 +405,9 @@ static int attempt_merge(struct request_queue *q, struct request *req,
	if (!rq_mergeable(req) || !rq_mergeable(next))
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;
		return 0;


	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

	/*
	/*
	 * not contiguous
	 * not contiguous
	 */
	 */
@@ -512,6 +503,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
		return false;
		return false;


	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

	/* different data direction or already started, don't merge */
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;
		return false;
+31 −0
Original line number Original line Diff line number Diff line
@@ -605,6 +605,18 @@ static inline bool rq_mergeable(struct request *rq)
	return true;
	return true;
}
}


static inline bool blk_check_merge_flags(unsigned int flags1,
					 unsigned int flags2)
{
	if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
		return false;

	if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
		return false;

	return true;
}

/*
/*
 * q->prep_rq_fn return values
 * q->prep_rq_fn return values
 */
 */
@@ -800,6 +812,25 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
	return blk_rq_cur_bytes(rq) >> 9;
	return blk_rq_cur_bytes(rq) >> 9;
}
}


static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
						     unsigned int cmd_flags)
{
	if (unlikely(cmd_flags & REQ_DISCARD))
		return q->limits.max_discard_sectors;

	return q->limits.max_sectors;
}

static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{
	struct request_queue *q = rq->q;

	if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
		return q->limits.max_hw_sectors;

	return blk_queue_get_max_sectors(q, rq->cmd_flags);
}

/*
/*
 * Request issue related functions.
 * Request issue related functions.
 */
 */