Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee714f2d authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe
Browse files

block: Finalize conversion of block limits functions



Remove compatibility wrappers and update remaining drivers.

Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 2cda2728
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
  	Controller->RequestQueue[n] = RequestQueue;
  	blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
  	RequestQueue->queuedata = Controller;
  	blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
	blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
	blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
	disk->queue = RequestQueue;
+2 −3
Original line number Diff line number Diff line
@@ -347,14 +347,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
	set_capacity(vblk->disk, cap);

	/* We can handle whatever the host told us to handle. */
	blk_queue_max_phys_segments(q, vblk->sg_elems-2);
	blk_queue_max_hw_segments(q, vblk->sg_elems-2);
	blk_queue_max_segments(q, vblk->sg_elems-2);

	/* No need to bounce any requests */
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);

	/* No real sector limit. */
	blk_queue_max_sectors(q, -1U);
	blk_queue_max_hw_sectors(q, -1U);

	/* Host can optionally specify maximum segment size and number of
	 * segments. */
+0 −24
Original line number Diff line number Diff line
@@ -921,26 +921,7 @@ extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);

/* Temporary compatibility wrapper */
static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
{
	blk_queue_max_hw_sectors(q, max);
}

extern void blk_queue_max_segments(struct request_queue *, unsigned short);

static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
{
	blk_queue_max_segments(q, max);
}

static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
{
	blk_queue_max_segments(q, max);
}


extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
		unsigned int max_discard_sectors);
@@ -1030,11 +1011,6 @@ static inline int sb_issue_discard(struct super_block *sb,

extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);

#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define SAFE_MAX_SECTORS 255
#define MAX_SEGMENT_SIZE	65536

enum blk_default_limits {
	BLK_MAX_SEGMENTS	= 128,
	BLK_SAFE_MAX_SECTORS	= 255,