Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4f6c3ab authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme: factor out a nvme_unmap_data helper



This is the counter part to nvme_map_data.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent ba1ca37e
Loading
Loading
Loading
Loading
+25 −18
Original line number Diff line number Diff line
@@ -89,10 +89,12 @@ static struct class *nvme_class;

struct nvme_dev;
struct nvme_queue;
struct nvme_iod;

static int __nvme_reset(struct nvme_dev *dev);
static int nvme_reset(struct nvme_dev *dev);
static void nvme_process_cq(struct nvme_queue *nvmeq);
static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_iod *iod);
static void nvme_dead_ctrl(struct nvme_dev *dev);

struct async_cmd_info {
@@ -655,7 +657,6 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
	struct request *req = iod_get_private(iod);
	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
	u16 status = le16_to_cpup(&cqe->status) >> 1;
	bool requeue = false;
	int error = 0;

	if (unlikely(status)) {
@@ -663,13 +664,14 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
		    && (jiffies - req->start_time) < req->timeout) {
			unsigned long flags;

			requeue = true;
			nvme_unmap_data(nvmeq->dev, iod);

			blk_mq_requeue_request(req);
			spin_lock_irqsave(req->q->queue_lock, flags);
			if (!blk_queue_stopped(req->q))
				blk_mq_kick_requeue_list(req->q);
			spin_unlock_irqrestore(req->q->queue_lock, flags);
			goto release_iod;
			return;
		}

		if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
@@ -692,20 +694,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
			"completing aborted command with status:%04x\n",
			error);

release_iod:
	if (iod->nents) {
		dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
			rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		if (blk_integrity_rq(req)) {
			if (!rq_data_dir(req))
				nvme_dif_remap(req, nvme_dif_complete);
			dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
				rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		}
	}
	nvme_free_iod(nvmeq->dev, iod);

	if (likely(!requeue))
	nvme_unmap_data(nvmeq->dev, iod);
	blk_mq_complete_request(req, error);
}

@@ -837,6 +826,24 @@ static int nvme_map_data(struct nvme_dev *dev, struct nvme_iod *iod,
	return ret;
}

static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_iod *iod)
{
	struct request *req = iod_get_private(iod);
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;

	if (iod->nents) {
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
		if (blk_integrity_rq(req)) {
			if (!rq_data_dir(req))
				nvme_dif_remap(req, nvme_dif_complete);
			dma_unmap_sg(dev->dev, iod->meta_sg, 1, dma_dir);
		}
	}

	nvme_free_iod(dev, iod);
}

/*
 * We reuse the small pool to allocate the 16-byte range here as it is not
 * worth having a special pool for these or additional cases to handle freeing