Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 31c7c7d2 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme: merge nvme_abort_req and nvme_timeout



We want to be able to return bettern error values frmo nvme_timeout, which
is significantly easier if the two functions are merged.  Also clean up and
reduce the printk spew so that we only get one message per abort.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 4c9f748f
Loading
Loading
Loading
Loading
+18 −29
Original line number Diff line number Diff line
@@ -1078,13 +1078,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

/**
 * nvme_abort_req - Attempt aborting a request
 *
 * Schedule controller reset if the command was already aborted once before and
 * still hasn't been returned to the driver, or if this is the admin queue.
 */
static void nvme_abort_req(struct request *req)
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -1093,6 +1087,11 @@ static void nvme_abort_req(struct request *req)
	struct nvme_cmd_info *abort_cmd;
	struct nvme_command cmd;

	/*
	 * Schedule controller reset if the command was already aborted once
	 * before and still hasn't been returned to the driver, or if this is
	 * the admin queue.
	 */
	if (!nvmeq->qid || cmd_rq->aborted) {
		spin_lock_irq(&dev_list_lock);
		if (!__nvme_reset(dev)) {
@@ -1101,16 +1100,16 @@ static void nvme_abort_req(struct request *req)
				 req->tag, nvmeq->qid);
		}
		spin_unlock_irq(&dev_list_lock);
		return;
		return BLK_EH_RESET_TIMER;
	}

	if (!dev->ctrl.abort_limit)
		return;
		return BLK_EH_RESET_TIMER;

	abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
			BLK_MQ_REQ_NOWAIT);
	if (IS_ERR(abort_req))
		return;
		return BLK_EH_RESET_TIMER;

	abort_cmd = blk_mq_rq_to_pdu(abort_req);
	nvme_set_info(abort_cmd, abort_req, abort_completion);
@@ -1124,9 +1123,16 @@ static void nvme_abort_req(struct request *req)
	--dev->ctrl.abort_limit;
	cmd_rq->aborted = 1;

	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
							nvmeq->qid);
	dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
				 req->tag, nvmeq->qid);
	nvme_submit_cmd(dev->queues[0], &cmd);

	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
}

static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1157,23 +1163,6 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
	fn(nvmeq, ctx, &cqe);
}

static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd->nvmeq;

	dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
							nvmeq->qid);
	nvme_abort_req(req);

	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
}

static void nvme_free_queue(struct nvme_queue *nvmeq)
{
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),