Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 165125e1 authored by Jens Axboe's avatar Jens Axboe
Browse files

[BLOCK] Get rid of request_queue_t typedef



Some of the code has been gradually transitioned to using the proper
struct request_queue, but there's lots left. So do a full sweet of
the kernel and get rid of this typedef and replace its uses with
the proper type.

Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent f695baf2
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -79,9 +79,9 @@ and how to prepare flush requests. Note that the term 'ordered' is
used to indicate the whole sequence of performing barrier requests
including draining and flushing.

typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq);
typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);

int blk_queue_ordered(request_queue_t *q, unsigned ordered,
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
		      prepare_flush_fn *prepare_flush_fn);

@q			: the queue in question
@@ -92,7 +92,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
For example, SCSI disk driver's prepare_flush_fn looks like the
following.

static void sd_prepare_flush(request_queue_t *q, struct request *rq)
static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
	memset(rq->cmd, 0, sizeof(rq->cmd));
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+5 −5
Original line number Diff line number Diff line
@@ -740,12 +740,12 @@ Block now offers some simple generic functionality to help support command
queueing (typically known as tagged command queueing), ie manage more than
one outstanding command on a queue at any given time.

	blk_queue_init_tags(request_queue_t *q, int depth)
	blk_queue_init_tags(struct request_queue *q, int depth)

	Initialize internal command tagging structures for a maximum
	depth of 'depth'.

	blk_queue_free_tags((request_queue_t *q)
	blk_queue_free_tags((struct request_queue *q)

	Teardown tag info associated with the queue. This will be done
	automatically by block if blk_queue_cleanup() is called on a queue
@@ -754,7 +754,7 @@ one outstanding command on a queue at any given time.
The above are initialization and exit management, the main helpers during
normal operations are:

	blk_queue_start_tag(request_queue_t *q, struct request *rq)
	blk_queue_start_tag(struct request_queue *q, struct request *rq)

	Start tagged operation for this request. A free tag number between
	0 and 'depth' is assigned to the request (rq->tag holds this number),
@@ -762,7 +762,7 @@ normal operations are:
	for this queue is already achieved (or if the tag wasn't started for
	some other reason), 1 is returned. Otherwise 0 is returned.

	blk_queue_end_tag(request_queue_t *q, struct request *rq)
	blk_queue_end_tag(struct request_queue *q, struct request *rq)

	End tagged operation on this request. 'rq' is removed from the internal
	book keeping structures.
@@ -781,7 +781,7 @@ queue. For instance, on IDE any tagged request error needs to clear both
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:

	blk_queue_invalidate_tags(request_queue_t *q)
	blk_queue_invalidate_tags(struct request_queue *q)

	Clear the internal block tag queue and re-add all the pending requests
	to the request queue. The driver will receive them again on the
+1 −1
Original line number Diff line number Diff line
@@ -83,6 +83,6 @@ struct bio *bio DBI First bio in request

struct bio *biotail		DBI	Last bio in request

request_queue_t *q		DB	Request queue this request belongs to
struct request_queue *q		DB	Request queue this request belongs to

struct request_list *rl		B	Request list this request came from
+1 −1
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ Field 8 -- # of milliseconds spent writing
    measured from __make_request() to end_that_request_last()).
Field  9 -- # of I/Os currently in progress
    The only field that should go to zero. Incremented as requests are
    given to appropriate request_queue_t and decremented as they finish.
    given to appropriate struct request_queue and decremented as they finish.
Field 10 -- # of milliseconds spent doing I/Os
    This field is increases so long as field 9 is nonzero.
Field 11 -- weighted # of milliseconds spent doing I/Os
+4 −4
Original line number Diff line number Diff line
@@ -161,11 +161,11 @@ static void mbox_rx_work(struct work_struct *work)
/*
 * Mailbox interrupt handler
 */
static void mbox_txq_fn(request_queue_t * q)
static void mbox_txq_fn(struct request_queue * q)
{
}

static void mbox_rxq_fn(request_queue_t * q)
static void mbox_rxq_fn(struct request_queue * q)
{
}

@@ -180,7 +180,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
	struct request *rq;
	mbox_msg_t msg;
	request_queue_t *q = mbox->rxq->queue;
	struct request_queue *q = mbox->rxq->queue;

	disable_mbox_irq(mbox, IRQ_RX);

@@ -297,7 +297,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
					request_fn_proc * proc,
					void (*work) (struct work_struct *))
{
	request_queue_t *q;
	struct request_queue *q;
	struct omap_mbox_queue *mq;

	mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
Loading