Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7bb9ad1 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: get rid of q->softirq_done_fn()



With the legacy path gone, all we do is funnel it through the
mq_ops->complete() operation.

Tested-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7d692330
Loading
Loading
Loading
Loading
+8 −9
Original line number Original line Diff line number Diff line
@@ -546,13 +546,15 @@ EXPORT_SYMBOL(blk_mq_end_request);
static void __blk_mq_complete_request_remote(void *data)
static void __blk_mq_complete_request_remote(void *data)
{
{
	struct request *rq = data;
	struct request *rq = data;
	struct request_queue *q = rq->q;


	rq->q->softirq_done_fn(rq);
	q->mq_ops->complete(rq);
}
}


static void __blk_mq_complete_request(struct request *rq)
static void __blk_mq_complete_request(struct request *rq)
{
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct request_queue *q = rq->q;
	bool shared = false;
	bool shared = false;
	int cpu;
	int cpu;


@@ -568,18 +570,18 @@ static void __blk_mq_complete_request(struct request *rq)
	 * So complete IO reqeust in softirq context in case of single queue
	 * So complete IO reqeust in softirq context in case of single queue
	 * for not degrading IO performance by irqsoff latency.
	 * for not degrading IO performance by irqsoff latency.
	 */
	 */
	if (rq->q->nr_hw_queues == 1) {
	if (q->nr_hw_queues == 1) {
		__blk_complete_request(rq);
		__blk_complete_request(rq);
		return;
		return;
	}
	}


	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
	if (!test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
		rq->q->softirq_done_fn(rq);
		q->mq_ops->complete(rq);
		return;
		return;
	}
	}


	cpu = get_cpu();
	cpu = get_cpu();
	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
		shared = cpus_share_cache(cpu, ctx->cpu);
		shared = cpus_share_cache(cpu, ctx->cpu);


	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -588,7 +590,7 @@ static void __blk_mq_complete_request(struct request *rq)
		rq->csd.flags = 0;
		rq->csd.flags = 0;
		smp_call_function_single_async(ctx->cpu, &rq->csd);
		smp_call_function_single_async(ctx->cpu, &rq->csd);
	} else {
	} else {
		rq->q->softirq_done_fn(rq);
		q->mq_ops->complete(rq);
	}
	}
	put_cpu();
	put_cpu();
}
}
@@ -2701,9 +2703,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
	 */
	 */
	q->poll_nsec = -1;
	q->poll_nsec = -1;


	if (set->ops->complete)
		blk_queue_softirq_done(q, set->ops->complete);

	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
	blk_mq_add_queue_tag_set(set, q);
	blk_mq_add_queue_tag_set(set, q);
	blk_mq_map_swqueue(q);
	blk_mq_map_swqueue(q);
+0 −6
Original line number Original line Diff line number Diff line
@@ -20,12 +20,6 @@ EXPORT_SYMBOL(blk_max_low_pfn);


unsigned long blk_max_pfn;
unsigned long blk_max_pfn;


void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
	q->softirq_done_fn = fn;
}
EXPORT_SYMBOL(blk_queue_softirq_done);

void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
{
	q->rq_timeout = timeout;
	q->rq_timeout = timeout;
+2 −2
Original line number Original line Diff line number Diff line
@@ -34,7 +34,7 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h)


		rq = list_entry(local_list.next, struct request, ipi_list);
		rq = list_entry(local_list.next, struct request, ipi_list);
		list_del_init(&rq->ipi_list);
		list_del_init(&rq->ipi_list);
		rq->q->softirq_done_fn(rq);
		rq->q->mq_ops->complete(rq);
	}
	}
}
}


@@ -102,7 +102,7 @@ void __blk_complete_request(struct request *req)
	unsigned long flags;
	unsigned long flags;
	bool shared = false;
	bool shared = false;


	BUG_ON(!q->softirq_done_fn);
	BUG_ON(!q->mq_ops->complete);


	local_irq_save(flags);
	local_irq_save(flags);
	cpu = smp_processor_id();
	cpu = smp_processor_id();
+2 −1
Original line number Original line Diff line number Diff line
@@ -115,6 +115,7 @@ typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
typedef bool (busy_fn)(struct request_queue *);
typedef bool (busy_fn)(struct request_queue *);
typedef void (complete_fn)(struct request *);




struct blk_mq_ops {
struct blk_mq_ops {
@@ -142,7 +143,7 @@ struct blk_mq_ops {
	 */
	 */
	poll_fn			*poll;
	poll_fn			*poll;


	softirq_done_fn		*complete;
	complete_fn		*complete;


	/*
	/*
	 * Called when the block layer side of a hardware queue has been
	 * Called when the block layer side of a hardware queue has been
+0 −3
Original line number Original line Diff line number Diff line
@@ -290,7 +290,6 @@ typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);


struct bio_vec;
struct bio_vec;
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);


enum blk_eh_timer_return {
enum blk_eh_timer_return {
@@ -407,7 +406,6 @@ struct request_queue {


	make_request_fn		*make_request_fn;
	make_request_fn		*make_request_fn;
	poll_q_fn		*poll_fn;
	poll_q_fn		*poll_fn;
	softirq_done_fn		*softirq_done_fn;
	dma_drain_needed_fn	*dma_drain_needed;
	dma_drain_needed_fn	*dma_drain_needed;


	const struct blk_mq_ops	*mq_ops;
	const struct blk_mq_ops	*mq_ops;
@@ -1113,7 +1111,6 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);