Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7a2a87d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme: switch abort to blk_execute_rq_nowait



And remove the now unused nvme_submit_cmd helper.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent d8f32166
Loading
Loading
Loading
Loading
+26 −35
Original line number Diff line number Diff line
@@ -375,20 +375,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
	}
}

static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
{
	struct request *req = ctx;

	u16 status = le16_to_cpup(&cqe->status) >> 1;
	u32 result = le32_to_cpup(&cqe->result);

	blk_mq_free_request(req);

	dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
}

static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
				  unsigned int tag)
{
@@ -440,14 +426,6 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
	nvmeq->sq_tail = tail;
}

static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
	unsigned long flags;
	spin_lock_irqsave(&nvmeq->q_lock, flags);
	__nvme_submit_cmd(nvmeq, cmd);
	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
}

static __le64 **iod_list(struct nvme_iod *iod)
{
	return ((void *)iod) + iod->offset;
@@ -1045,13 +1023,25 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

static void abort_endio(struct request *req, int error)
{
	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd->nvmeq;
	u32 result = (u32)(uintptr_t)req->special;
	u16 status = req->errors;

	dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);

	blk_mq_free_request(req);
}

static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd_rq->nvmeq;
	struct nvme_dev *dev = nvmeq->dev;
	struct request *abort_req;
	struct nvme_cmd_info *abort_cmd;
	struct nvme_command cmd;

	/*
@@ -1089,30 +1079,31 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
		return BLK_EH_HANDLED;
	}

	if (atomic_dec_and_test(&dev->ctrl.abort_limit))
		return BLK_EH_RESET_TIMER;
	cmd_rq->aborted = 1;

	abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
			BLK_MQ_REQ_NOWAIT);
	if (IS_ERR(abort_req)) {
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_cmd = blk_mq_rq_to_pdu(abort_req);
	nvme_set_info(abort_cmd, abort_req, abort_completion);

	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
	cmd.abort.cid = req->tag;
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
	cmd.abort.command_id = abort_req->tag;

	cmd_rq->aborted = 1;

	dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
				 req->tag, nvmeq->qid);
	nvme_submit_cmd(dev->queues[0], &cmd);

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
			BLK_MQ_REQ_NOWAIT);
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);

	/*
	 * The aborted req will be completed on receiving the abort req.