Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82fbc52e authored by Jordan Crouse's avatar Jordan Crouse
Browse files

drm/msm: Consoldate mmu ->map and mmu ->map_sg



For all intents and purposes the mmu ->map function has used a
scatter gather list for some time. Drop the pretense and just
make both the SMMU and IOMMU flavors use the sg flavor of their
respective iommu API functions. As a result we can drop the
map_sg hooks in the SMMU driver and get rid of a considerable
amount of re-invented wheels in the IOMMU driver.

Change-Id: Ic0dedbadc4724c8ae389892fb85610435c5c08cf
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 7af30a44
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -48,8 +48,7 @@ static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
		aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
			sgt, buf, DMA_BIDIRECTIONAL);
	else
		aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
			DMA_BIDIRECTIONAL);
		aspace->mmu->funcs->unmap(aspace->mmu, 0, sgt);

	vma->iova = 0;

@@ -68,8 +67,7 @@ static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
		ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
			DMA_BIDIRECTIONAL);
	else
		ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
			DMA_BIDIRECTIONAL);
		ret = aspace->mmu->funcs->map(aspace->mmu, 0, sgt, flags);

	if (!ret)
		vma->iova = sg_dma_address(sgt->sgl);
+14 −43
Original line number Diff line number Diff line
@@ -200,9 +200,6 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
{
	struct msm_iommu *iommu = to_msm_iommu(mmu);
	struct iommu_domain *domain = iommu->domain;
	struct scatterlist *sg;
	uint64_t da = iova;
	unsigned int i, j;
	int ret;
	u32 prot = IOMMU_READ;

@@ -218,57 +215,31 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
	if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
		prot |= IOMMU_CACHE;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		phys_addr_t pa = sg_phys(sg) - sg->offset;
		size_t bytes = sg->length + sg->offset;

		VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);

		ret = iommu_map(domain, da, pa, bytes, prot);
	/* iommu_map_sg returns the number of bytes mapped */
	ret =  iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
	if (ret)
			goto fail;

		da += bytes;
	}

	return 0;

fail:
	da = iova;
		sgt->sgl->dma_address = iova;

	for_each_sg(sgt->sgl, sg, i, j) {
		size_t bytes = sg->length + sg->offset;
		iommu_unmap(domain, da, bytes);
		da += bytes;
	}
	return ret;
	return ret ? 0 : -ENOMEM;
}

static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
		struct sg_table *sgt)
{
	struct msm_iommu *iommu = to_msm_iommu(mmu);
	struct iommu_domain *domain = iommu->domain;
	struct scatterlist *sg;
	uint64_t da = iova;
	int i;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		size_t bytes = sg->length + sg->offset;
		size_t unmapped;
	size_t len = 0;
	int ret, i;

		unmapped = iommu_unmap(domain, da, bytes);
		if (unmapped < bytes)
			return unmapped;
	for_each_sg(sgt->sgl, sg, sgt->nents, i)
		len += sg->length;

		VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
	ret = iommu_unmap(domain, iova, len);
	if (ret != len)
		dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);

		BUG_ON(!PAGE_ALIGNED(bytes));

		da += bytes;
	}

	return 0;
	sgt->sgl->dma_address = 0;
}

static void msm_iommu_destroy(struct msm_mmu *mmu)
+1 −5
Original line number Diff line number Diff line
@@ -35,11 +35,7 @@ struct msm_mmu_funcs {
	void (*detach)(struct msm_mmu *mmu);
	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
			u32 flags);
	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
	int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
			enum dma_data_direction dir);
	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
		enum dma_data_direction dir);
	void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
	int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
			struct dma_buf *dma_buf, int dir);
	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+5 −89
Original line number Diff line number Diff line
@@ -109,103 +109,21 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	struct iommu_domain *domain;
	struct scatterlist *sg;
	uint64_t da = iova;
	unsigned int i, j;
	int ret;

	if (!client)
		return -ENODEV;

	domain = client->mmu_mapping->domain;
	if (!domain || !sgt)
		return -EINVAL;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		u32 pa = sg_phys(sg) - sg->offset;
		size_t bytes = sg->length + sg->offset;

		VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);

		ret = iommu_map(domain, da, pa, bytes,
			IOMMU_READ | IOMMU_WRITE);
		if (ret)
			goto fail;

		da += bytes;
	}

	return 0;
	ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
			DMA_BIDIRECTIONAL);

fail:
	da = iova;

	for_each_sg(sgt->sgl, sg, i, j) {
		size_t bytes = sg->length + sg->offset;

		iommu_unmap(domain, da, bytes);
		da += bytes;
	}
	return ret;
	return (ret != sgt->nents) ? -ENOMEM : 0;
}

static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
		enum dma_data_direction dir)
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret;

	ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
	if (ret != sgt->nents)
		return -ENOMEM;

	return 0;
}

static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
		enum dma_data_direction dir)
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);

	dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
}

static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
		struct sg_table *sgt)
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	struct iommu_domain *domain;
	struct scatterlist *sg;
	uint64_t da = iova;
	int i;

	if (!client)
		return -ENODEV;

	domain = client->mmu_mapping->domain;
	if (!domain || !sgt)
		return -EINVAL;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		size_t bytes = sg->length + sg->offset;
		size_t unmapped;

		unmapped = iommu_unmap(domain, da, bytes);
		if (unmapped < bytes)
			return unmapped;

		VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);

		WARN_ON(!PAGE_ALIGNED(bytes));

		da += bytes;
	}

	return 0;
	dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
}

static void msm_smmu_destroy(struct msm_mmu *mmu)
@@ -249,8 +167,6 @@ static const struct msm_mmu_funcs funcs = {
	.attach = msm_smmu_attach,
	.detach = msm_smmu_detach,
	.map = msm_smmu_map,
	.map_sg = msm_smmu_map_sg,
	.unmap_sg = msm_smmu_unmap_sg,
	.unmap = msm_smmu_unmap,
	.map_dma_buf = msm_smmu_map_dma_buf,
	.unmap_dma_buf = msm_smmu_unmap_dma_buf,