Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ae03bf63 authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe
Browse files

block: Use accessor functions for queue limits



Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.

Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent e1defc4f
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,

		bio->bi_sector = sector;

		if (nr_sects > q->max_hw_sectors) {
			bio->bi_size = q->max_hw_sectors << 9;
			nr_sects -= q->max_hw_sectors;
			sector += q->max_hw_sectors;
		if (nr_sects > queue_max_hw_sectors(q)) {
			bio->bi_size = queue_max_hw_sectors(q) << 9;
			nr_sects -= queue_max_hw_sectors(q);
			sector += queue_max_hw_sectors(q);
		} else {
			bio->bi_size = nr_sects << 9;
			nr_sects = 0;
+8 −8
Original line number Diff line number Diff line
@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
			goto end_io;
		}

		if (unlikely(nr_sectors > q->max_hw_sectors)) {
		if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
			       bdevname(bio->bi_bdev, b),
			       bio_sectors(bio),
				q->max_hw_sectors);
			       queue_max_hw_sectors(q));
			goto end_io;
		}

@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
 */
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
	if (blk_rq_sectors(rq) > q->max_sectors ||
	    blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
	if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
		printk(KERN_ERR "%s: over max size limit.\n", __func__);
		return -EIO;
	}
@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
	 * limitation.
	 */
	blk_recalc_rq_segments(rq);
	if (rq->nr_phys_segments > q->max_phys_segments ||
	    rq->nr_phys_segments > q->max_hw_segments) {
	if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
	    rq->nr_phys_segments > queue_max_hw_segments(q)) {
		printk(KERN_ERR "%s: over max segments limit.\n", __func__);
		return -EIO;
	}
+2 −2
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
	struct bio *bio = NULL;
	int ret;

	if (len > (q->max_hw_sectors << 9))
	if (len > (queue_max_hw_sectors(q) << 9))
		return -EINVAL;
	if (!len)
		return -EINVAL;
@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
	struct bio *bio;
	int ret;

	if (len > (q->max_hw_sectors << 9))
	if (len > (queue_max_hw_sectors(q) << 9))
		return -EINVAL;
	if (!len || !kbuf)
		return -EINVAL;
+14 −13
Original line number Diff line number Diff line
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
			 * never considered part of another segment, since that
			 * might change with the bounce page.
			 */
			high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
			if (high || highprv)
				goto new_segment;
			if (cluster) {
				if (seg_size + bv->bv_len > q->max_segment_size)
				if (seg_size + bv->bv_len
				    > queue_max_segment_size(q))
					goto new_segment;
				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
					goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
		return 0;

	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
	    q->max_segment_size)
	    queue_max_segment_size(q))
		return 0;

	if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		int nbytes = bvec->bv_len;

		if (bvprv && cluster) {
			if (sg->length + nbytes > q->max_segment_size)
			if (sg->length + nbytes > queue_max_segment_size(q))
				goto new_segment;

			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
	int nr_phys_segs = bio_phys_segments(q, bio);

	if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
	if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
	    req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
	unsigned short max_sectors;

	if (unlikely(blk_pc_request(req)))
		max_sectors = q->max_hw_sectors;
		max_sectors = queue_max_hw_sectors(q);
	else
		max_sectors = q->max_sectors;
		max_sectors = queue_max_sectors(q);

	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
		req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
	unsigned short max_sectors;

	if (unlikely(blk_pc_request(req)))
		max_sectors = q->max_hw_sectors;
		max_sectors = queue_max_hw_sectors(q);
	else
		max_sectors = q->max_sectors;
		max_sectors = queue_max_sectors(q);


	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
	/*
	 * Will it become too large?
	 */
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
		total_phys_segments--;
	}

	if (total_phys_segments > q->max_phys_segments)
	if (total_phys_segments > queue_max_phys_segments(q))
		return 0;

	if (total_phys_segments > q->max_hw_segments)
	if (total_phys_segments > queue_max_hw_segments(q))
		return 0;

	/* Merge is OK... */
+12 −3
Original line number Diff line number Diff line
@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
}
EXPORT_SYMBOL(blk_queue_max_sectors);

void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
{
	if (BLK_DEF_MAX_SECTORS > max_sectors)
		q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
	else
		q->max_hw_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);

/**
 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
 * @q:  the request queue for the device
@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
			       dma_drain_needed_fn *dma_drain_needed,
			       void *buf, unsigned int size)
{
	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
	if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
		return -EINVAL;
	/* make room for appending the drain */
	--q->max_hw_segments;
	--q->max_phys_segments;
	blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
	blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
	q->dma_drain_needed = dma_drain_needed;
	q->dma_drain_buffer = buf;
	q->dma_drain_size = size;
Loading