Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 529262d5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove ->poll_fn



This was intended to support users like nvme multipath, but is just
getting in the way and adding another indirect call.

Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9d6610b7
Loading
Loading
Loading
Loading
+0 −23
Original line number Diff line number Diff line
@@ -1250,29 +1250,6 @@ blk_qc_t submit_bio(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio);

/**
 * blk_poll - poll for IO completions
 * @q:  the queue
 * @cookie: cookie passed back at IO submission time
 * @spin: whether to spin for completions
 *
 * Description:
 *    Poll for completions on the passed in queue. Returns number of
 *    completed entries found. If @spin is true, then blk_poll will continue
 *    looping until at least one completion is found, unless the task is
 *    otherwise marked running (or we need to reschedule).
 */
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
	if (!q->poll_fn || !blk_qc_t_valid(cookie))
		return 0;

	if (current->plug)
		blk_flush_plug_list(current->plug, false);
	return q->poll_fn(q, cookie, spin);
}
EXPORT_SYMBOL_GPL(blk_poll);

/**
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
 *                              for new the queue limits
+19 −5
Original line number Diff line number Diff line
@@ -38,7 +38,6 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"

static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);

@@ -2838,8 +2837,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
	spin_lock_init(&q->requeue_lock);

	blk_queue_make_request(q, blk_mq_make_request);
	if (q->mq_ops->poll)
		q->poll_fn = blk_mq_poll;

	/*
	 * Do this after blk_queue_make_request() overrides it...
@@ -3400,14 +3397,30 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
	return blk_mq_poll_hybrid_sleep(q, hctx, rq);
}

static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
/**
 * blk_poll - poll for IO completions
 * @q:  the queue
 * @cookie: cookie passed back at IO submission time
 * @spin: whether to spin for completions
 *
 * Description:
 *    Poll for completions on the passed in queue. Returns number of
 *    completed entries found. If @spin is true, then blk_poll will continue
 *    looping until at least one completion is found, unless the task is
 *    otherwise marked running (or we need to reschedule).
 */
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
	struct blk_mq_hw_ctx *hctx;
	long state;

	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
	if (!blk_qc_t_valid(cookie) ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		return 0;

	if (current->plug)
		blk_flush_plug_list(current->plug, false);

	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];

	/*
@@ -3448,6 +3461,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
	__set_current_state(TASK_RUNNING);
	return 0;
}
EXPORT_SYMBOL_GPL(blk_poll);

unsigned int blk_mq_rq_cpu(struct request *rq)
{
+0 −2
Original line number Diff line number Diff line
@@ -283,7 +283,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;

typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t, bool spin);

struct bio_vec;
typedef int (dma_drain_needed_fn)(struct request *);
@@ -401,7 +400,6 @@ struct request_queue {
	struct rq_qos		*rq_qos;

	make_request_fn		*make_request_fn;
	poll_q_fn		*poll_fn;
	dma_drain_needed_fn	*dma_drain_needed;

	const struct blk_mq_ops	*mq_ops;