Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 75eb6c37 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

block: pass around REQ_* flags instead of broken down booleans during request alloc/free



blk_alloc_request() and freed_request() take different combinations of
REQ_* @flags, @priv and @is_sync when @flags is superset of the latter
two.  Make them take @flags only.  This cleans up the code a bit and
will ease updating allocation related REQ_* flags.

This patch doesn't introduce any functional difference.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bc9fcbf9
Loading
Loading
Loading
Loading
+17 −19
Original line number Original line Diff line number Diff line
@@ -574,7 +574,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
}
}


static struct request *
static struct request *
blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)
{
{
	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);


@@ -585,13 +585,11 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)


	rq->cmd_flags = flags | REQ_ALLOCED;
	rq->cmd_flags = flags | REQ_ALLOCED;


	if (priv) {
	if ((flags & REQ_ELVPRIV) &&
		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
	    unlikely(elv_set_request(q, rq, gfp_mask))) {
		mempool_free(rq, q->rq.rq_pool);
		mempool_free(rq, q->rq.rq_pool);
		return NULL;
		return NULL;
	}
	}
		rq->cmd_flags |= REQ_ELVPRIV;
	}


	return rq;
	return rq;
}
}
@@ -649,12 +647,13 @@ static void __freed_request(struct request_queue *q, int sync)
 * A request has just been released.  Account for it, update the full and
 * A request has just been released.  Account for it, update the full and
 * congestion status, wake up any waiters.   Called under q->queue_lock.
 * congestion status, wake up any waiters.   Called under q->queue_lock.
 */
 */
static void freed_request(struct request_queue *q, int sync, int priv)
static void freed_request(struct request_queue *q, unsigned int flags)
{
{
	struct request_list *rl = &q->rq;
	struct request_list *rl = &q->rq;
	int sync = rw_is_sync(flags);


	rl->count[sync]--;
	rl->count[sync]--;
	if (priv)
	if (flags & REQ_ELVPRIV)
		rl->elvpriv--;
		rl->elvpriv--;


	__freed_request(q, sync);
	__freed_request(q, sync);
@@ -694,7 +693,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
	struct request_list *rl = &q->rq;
	struct request_list *rl = &q->rq;
	struct io_context *ioc = NULL;
	struct io_context *ioc = NULL;
	const bool is_sync = rw_is_sync(rw_flags) != 0;
	const bool is_sync = rw_is_sync(rw_flags) != 0;
	int may_queue, priv = 0;
	int may_queue;


	may_queue = elv_may_queue(q, rw_flags);
	may_queue = elv_may_queue(q, rw_flags);
	if (may_queue == ELV_MQUEUE_NO)
	if (may_queue == ELV_MQUEUE_NO)
@@ -738,9 +737,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
	rl->count[is_sync]++;
	rl->count[is_sync]++;
	rl->starved[is_sync] = 0;
	rl->starved[is_sync] = 0;


	if (blk_rq_should_init_elevator(bio)) {
	if (blk_rq_should_init_elevator(bio) &&
		priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
	    !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
		if (priv)
		rw_flags |= REQ_ELVPRIV;
		rl->elvpriv++;
		rl->elvpriv++;
	}
	}


@@ -748,7 +747,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
		rw_flags |= REQ_IO_STAT;
		rw_flags |= REQ_IO_STAT;
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(q->queue_lock);


	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
	rq = blk_alloc_request(q, rw_flags, gfp_mask);
	if (unlikely(!rq)) {
	if (unlikely(!rq)) {
		/*
		/*
		 * Allocation failed presumably due to memory. Undo anything
		 * Allocation failed presumably due to memory. Undo anything
@@ -758,7 +757,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
		 * wait queue, but this is pretty rare.
		 * wait queue, but this is pretty rare.
		 */
		 */
		spin_lock_irq(q->queue_lock);
		spin_lock_irq(q->queue_lock);
		freed_request(q, is_sync, priv);
		freed_request(q, rw_flags);


		/*
		/*
		 * in the very unlikely event that allocation failed and no
		 * in the very unlikely event that allocation failed and no
@@ -1050,14 +1049,13 @@ void __blk_put_request(struct request_queue *q, struct request *req)
	 * it didn't come out of our reserved rq pools
	 * it didn't come out of our reserved rq pools
	 */
	 */
	if (req->cmd_flags & REQ_ALLOCED) {
	if (req->cmd_flags & REQ_ALLOCED) {
		int is_sync = rq_is_sync(req) != 0;
		unsigned int flags = req->cmd_flags;
		int priv = req->cmd_flags & REQ_ELVPRIV;


		BUG_ON(!list_empty(&req->queuelist));
		BUG_ON(!list_empty(&req->queuelist));
		BUG_ON(!hlist_unhashed(&req->hash));
		BUG_ON(!hlist_unhashed(&req->hash));


		blk_free_request(q, req);
		blk_free_request(q, req);
		freed_request(q, is_sync, priv);
		freed_request(q, flags);
	}
	}
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
EXPORT_SYMBOL_GPL(__blk_put_request);