Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c917dfe5 authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe
Browse files

NVMe: Start all requests



Once the nvme callback is set for a request, the driver can start it
and make it available for timeout handling. For timed out commands on a
device that is not initialized, this fixes potential deadlocks that can
occur on startup and shutdown when a device is unresponsive since they
can now be cancelled.

Asynchronous requests do not have any expected timeout, so these are
using the new "REQ_NO_TIMEOUT" request flags.

Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent eb130dbf
Loading
Loading
Loading
Loading
+12 −4
Original line number Diff line number Diff line
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
	cmd->fn = handler;
	cmd->ctx = ctx;
	cmd->aborted = 0;
	blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
}

/* Special values must be less than 0x1000 */
@@ -664,8 +665,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
		}
	}

	blk_mq_start_request(req);

	nvme_set_info(cmd, iod, req_completion);
	spin_lock_irq(&nvmeq->q_lock);
	if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +834,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->cmd_flags |= REQ_NO_TIMEOUT;
	cmd_info = blk_mq_rq_to_pdu(req);
	nvme_set_info(cmd_info, req, async_req_completion);

@@ -1086,7 +1086,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)

	dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
							nvmeq->qid);
	if (nvmeq->dev->initialized)

	if (!nvmeq->dev->initialized) {
		/*
		 * Force cancelled command frees the request, which requires we
		 * return BLK_EH_NOT_HANDLED.
		 */
		nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
		return BLK_EH_NOT_HANDLED;
	}
	nvme_abort_req(req);

	/*