Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72d4cd9f authored by Mike Snitzer's avatar Mike Snitzer Committed by Jens Axboe
Browse files

block: max hardware sectors limit wrapper



Implement blk_limits_max_hw_sectors() and make
blk_queue_max_hw_sectors() a wrapper around it.

DM needs this to avoid setting queue_limits' max_hw_sectors and
max_sectors directly.  dm_set_device_limits() now leverages
blk_limits_max_hw_sectors() logic to establish the appropriate
max_hw_sectors minimum (PAGE_SIZE).  Fixes issue where DM was
incorrectly setting max_sectors rather than max_hw_sectors (which
caused dm_merge_bvec()'s max_hw_sectors check to be ineffective).

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@kernel.org
Acked-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent e692cb66
Loading
Loading
Loading
Loading
+20 −6
Original line number Diff line number Diff line
@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
EXPORT_SYMBOL(blk_queue_bounce_limit);

/**
 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 * @q:  the request queue for the device
 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
 * @limits: the queue limits
 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 *
 * Description:
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 *    The soft limit can not exceed max_hw_sectors.
 **/
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
{
	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -252,10 +252,24 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
		       __func__, max_hw_sectors);
	}

	q->limits.max_hw_sectors = max_hw_sectors;
	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
	limits->max_hw_sectors = max_hw_sectors;
	limits->max_sectors = min_t(unsigned int, max_hw_sectors,
				    BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_limits_max_hw_sectors);

/**
 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 * @q:  the request queue for the device
 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 *
 * Description:
 *    See description for blk_limits_max_hw_sectors().
 **/
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
	blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);

/**
+2 −3
Original line number Diff line number Diff line
@@ -517,8 +517,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
	 */

	if (q->merge_bvec_fn && !ti->type->merge)
		limits->max_sectors =
			min_not_zero(limits->max_sectors,
		blk_limits_max_hw_sectors(limits,
					  (unsigned int) (PAGE_SIZE >> 9));
	return 0;
}
+1 −0
Original line number Diff line number Diff line
@@ -808,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);