Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08e0029a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: remove the error argument to blk_mq_complete_request



Now that all drivers that call blk_mq_complete_requests have a
->complete callback we can remove the direct call to blk_mq_end_request,
as well as the error argument to blk_mq_complete_request.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarBart Van Assche <Bart.VanAssche@sandisk.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 2609587c
Loading
Loading
Loading
Loading
+3 −12
Original line number Diff line number Diff line
@@ -442,16 +442,9 @@ static void blk_mq_stat_add(struct request *rq)

static void __blk_mq_complete_request(struct request *rq)
{
	struct request_queue *q = rq->q;

	if (rq->internal_tag != -1)
		blk_mq_sched_completed_request(rq);

	blk_mq_stat_add(rq);

	if (!q->softirq_done_fn)
		blk_mq_end_request(rq, rq->errors);
	else
	blk_mq_ipi_complete_request(rq);
}

@@ -463,17 +456,15 @@ static void __blk_mq_complete_request(struct request *rq)
 *	Ends all I/O on a request. It does not handle partial completions.
 *	The actual completion happens out-of-order, through a IPI handler.
 **/
void blk_mq_complete_request(struct request *rq, int error)
void blk_mq_complete_request(struct request *rq)
{
	struct request_queue *q = rq->q;

	if (unlikely(blk_should_fake_timeout(q)))
		return;
	if (!blk_mark_rq_complete(rq)) {
		rq->errors = error;
	if (!blk_mark_rq_complete(rq))
		__blk_mq_complete_request(rq);
}
}
EXPORT_SYMBOL(blk_mq_complete_request);

int blk_mq_request_started(struct request *rq)
+2 −2
Original line number Diff line number Diff line
@@ -465,7 +465,7 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
	struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);

	cmd->ret = ret;
	blk_mq_complete_request(cmd->rq, 0);
	blk_mq_complete_request(cmd->rq);
}

static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
@@ -1685,7 +1685,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
	/* complete non-aio request */
	if (!cmd->use_aio || ret) {
		cmd->ret = ret ? -EIO : 0;
		blk_mq_complete_request(cmd->rq, 0);
		blk_mq_complete_request(cmd->rq);
	}
}

+2 −2
Original line number Diff line number Diff line
@@ -242,7 +242,7 @@ static void mtip_async_complete(struct mtip_port *port,
	rq = mtip_rq_from_tag(dd, tag);

	cmd->status = status;
	blk_mq_complete_request(rq, 0);
	blk_mq_complete_request(rq);
}

/*
@@ -4109,7 +4109,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)

	if (likely(!reserv)) {
		cmd->status = -ENODEV;
		blk_mq_complete_request(rq, 0);
		blk_mq_complete_request(rq);
	} else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {

		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+2 −2
Original line number Diff line number Diff line
@@ -635,7 +635,7 @@ static void recv_work(struct work_struct *work)
			break;
		}

		blk_mq_complete_request(blk_mq_rq_from_pdu(cmd), 0);
		blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
	}
	atomic_dec(&config->recv_threads);
	wake_up(&config->recv_wq);
@@ -651,7 +651,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
		return;
	cmd = blk_mq_rq_to_pdu(req);
	cmd->status = -EIO;
	blk_mq_complete_request(req, 0);
	blk_mq_complete_request(req);
}

static void nbd_clear_que(struct nbd_device *nbd)
+1 −1
Original line number Diff line number Diff line
@@ -281,7 +281,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
	case NULL_IRQ_SOFTIRQ:
		switch (queue_mode)  {
		case NULL_Q_MQ:
			blk_mq_complete_request(cmd->rq, 0);
			blk_mq_complete_request(cmd->rq);
			break;
		case NULL_Q_RQ:
			blk_complete_request(cmd->rq);
Loading