Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4aedb705 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

nvme-pci: split metadata handling from nvme_map_data / nvme_unmap_data



This prepares for some bigger changes to the data mapping helpers.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
parent 783b94bd
Loading
Loading
Loading
Loading
+27 −21
Original line number Original line Diff line number Diff line
@@ -592,11 +592,6 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
	dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
	dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
	int i;
	int i;


	if (blk_integrity_rq(req)) {
		dma_unmap_page(dev->dev, iod->meta_dma,
				rq_integrity_vec(req)->bv_len, dma_dir);
	}

	if (iod->nents) {
	if (iod->nents) {
		/* P2PDMA requests do not need to be unmapped */
		/* P2PDMA requests do not need to be unmapped */
		if (!is_pci_p2pdma_page(sg_page(iod->sg)))
		if (!is_pci_p2pdma_page(sg_page(iod->sg)))
@@ -858,24 +853,23 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
	else
	else
		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);

out:
	if (ret != BLK_STS_OK)
	if (ret != BLK_STS_OK)
		goto out;
		nvme_unmap_data(dev, req);
	return ret;
}

static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
		struct nvme_command *cmnd)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);


	ret = BLK_STS_IOERR;
	if (blk_integrity_rq(req)) {
	iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
	iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
				dma_dir, 0);
			rq_dma_dir(req), 0);
	if (dma_mapping_error(dev->dev, iod->meta_dma))
	if (dma_mapping_error(dev->dev, iod->meta_dma))
			goto out;
		return BLK_STS_IOERR;
	cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
	cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
	}
	return 0;

	return BLK_STS_OK;

out:
	nvme_unmap_data(dev, req);
	return ret;
}
}


/*
/*
@@ -913,9 +907,17 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
			goto out_free_cmd;
			goto out_free_cmd;
	}
	}


	if (blk_integrity_rq(req)) {
		ret = nvme_map_metadata(dev, req, &cmnd);
		if (ret)
			goto out_unmap_data;
	}

	blk_mq_start_request(req);
	blk_mq_start_request(req);
	nvme_submit_cmd(nvmeq, &cmnd, bd->last);
	nvme_submit_cmd(nvmeq, &cmnd, bd->last);
	return BLK_STS_OK;
	return BLK_STS_OK;
out_unmap_data:
	nvme_unmap_data(dev, req);
out_free_cmd:
out_free_cmd:
	nvme_cleanup_cmd(req);
	nvme_cleanup_cmd(req);
	return ret;
	return ret;
@@ -924,10 +926,14 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_pci_complete_rq(struct request *req)
static void nvme_pci_complete_rq(struct request *req)
{
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_dev *dev = iod->nvmeq->dev;


	nvme_cleanup_cmd(req);
	nvme_cleanup_cmd(req);
	if (blk_integrity_rq(req))
		dma_unmap_page(dev->dev, iod->meta_dma,
			       rq_integrity_vec(req)->bv_len, rq_data_dir(req));
	if (blk_rq_nr_phys_segments(req))
	if (blk_rq_nr_phys_segments(req))
		nvme_unmap_data(iod->nvmeq->dev, req);
		nvme_unmap_data(dev, req);
	nvme_complete_rq(req);
	nvme_complete_rq(req);
}
}