Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 086fa5ff authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe
Browse files

block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors



The block layer calling convention is blk_queue_<limit name>.
blk_queue_max_sectors predates this practice, leading to some confusion.
Rename the function to appropriately reflect that its intended use is to
set max_hw_sectors.

Also introduce a temporary wrapper for backwards compability.  This can
be removed after the merge window is closed.

Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent eb28d31b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
	ubd_dev->fd = fd;

	if(ubd_dev->cow.file != NULL){
		blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long));
		blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));

		err = -ENOMEM;
		ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
+4 −4
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
	q->unplug_timer.data = (unsigned long)q;

	blk_set_default_limits(&q->limits);
	blk_queue_max_sectors(q, BLK_SAFE_MAX_SECTORS);
	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);

	/*
	 * If the caller didn't supply a lock, fall back to our embedded
@@ -210,7 +210,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
EXPORT_SYMBOL(blk_queue_bounce_limit);

/**
 * blk_queue_max_sectors - set max sectors for a request for this queue
 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 * @q:  the request queue for the device
 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 *
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 *    The soft limit can not exceed max_hw_sectors.
 **/
void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors)
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -237,7 +237,7 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors)
	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
				      BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_queue_max_sectors);
EXPORT_SYMBOL(blk_queue_max_hw_sectors);

/**
 * blk_queue_max_discard_sectors - set max sectors for a single discard
+1 −1
Original line number Diff line number Diff line
@@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
		dev->flags |= ATA_DFLAG_NO_UNLOAD;

	/* configure max sectors */
	blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
	blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);

	if (dev->class == ATA_DEV_ATAPI) {
		struct request_queue *q = sdev->request_queue;
+1 −1
Original line number Diff line number Diff line
@@ -2535,7 +2535,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
  	RequestQueue->queuedata = Controller;
  	blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
	blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
	blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
	blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
	disk->queue = RequestQueue;
	sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
	disk->major = MajorNumber;
+1 −1
Original line number Diff line number Diff line
@@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i)
		goto out_free_dev;
	blk_queue_make_request(brd->brd_queue, brd_make_request);
	blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
	blk_queue_max_sectors(brd->brd_queue, 1024);
	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);

	disk = brd->brd_disk = alloc_disk(1 << part_shift);
Loading