Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db8c48e4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme: return BLK_EH_DONE from ->timeout



NVMe always completes the request before returning from ->timeout, either
by polling for it, or by disabling the controller.  Return BLK_EH_DONE so
that the block layer doesn't even try to complete it again.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6600593c
Loading
Loading
Loading
Loading
+5 −9
Original line number Diff line number Diff line
@@ -1205,7 +1205,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
		nvme_warn_reset(dev, csts);
		nvme_dev_disable(dev, false);
		nvme_reset_ctrl(&dev->ctrl);
		return BLK_EH_HANDLED;
		return BLK_EH_DONE;
	}

	/*
@@ -1215,14 +1215,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
		dev_warn(dev->ctrl.device,
			 "I/O %d QID %d timeout, completion polled\n",
			 req->tag, nvmeq->qid);
		return BLK_EH_HANDLED;
		return BLK_EH_DONE;
	}

	/*
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
	 * shutdown, so we return BLK_EH_HANDLED.
	 * shutdown, so we return BLK_EH_DONE.
	 */
	switch (dev->ctrl.state) {
	case NVME_CTRL_CONNECTING:
@@ -1232,7 +1232,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
			 req->tag, nvmeq->qid);
		nvme_dev_disable(dev, false);
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
		return BLK_EH_HANDLED;
		return BLK_EH_DONE;
	default:
		break;
	}
@@ -1249,12 +1249,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
		nvme_dev_disable(dev, false);
		nvme_reset_ctrl(&dev->ctrl);

		/*
		 * Mark the request as handled, since the inline shutdown
		 * forces all outstanding requests to complete.
		 */
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
		return BLK_EH_HANDLED;
		return BLK_EH_DONE;
	}

	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
+1 −1
Original line number Diff line number Diff line
@@ -1598,7 +1598,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
	/* fail with DNR on cmd timeout */
	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;

	return BLK_EH_HANDLED;
	return BLK_EH_DONE;
}

static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+1 −1
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
	/* fail with DNR on admin cmd timeout */
	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;

	return BLK_EH_HANDLED;
	return BLK_EH_DONE;
}

static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,