Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1576b22a authored by Jordan Crouse's avatar Jordan Crouse
Browse files

drm/msm: Finish consolidating the address space code



Now that the SMMU/IOMMU differences have been resolved the only delta
between the SMMU and the IOMMU address space implementations is the
actual address space allocation which we can work around by assuming
the caller doesn't want address generation if they specify the same
start and end address (i.e. 0).

With that optimization we can get rid of the address space
sub functions and a bunch of otherwise duplicated code.

Change-Id: Ic0dedbaddef0fcd3a8f39e30f95c71245d84f111
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 438cdcda
Loading
Loading
Loading
Loading
+2 −11
Original line number Diff line number Diff line
@@ -25,21 +25,12 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */

struct msm_gem_aspace_ops {
	int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
		struct sg_table *sgt, void *priv, unsigned int flags);

	void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
		struct sg_table *sgt, void *priv);

	void (*destroy)(struct msm_gem_address_space *);
};

struct msm_gem_address_space {
	const char *name;
	struct msm_mmu *mmu;
	const struct msm_gem_aspace_ops *ops;
	struct kref kref;
	struct drm_mm mm;
	u64 va_len;
};

struct msm_gem_vma {
+59 −123
Original line number Diff line number Diff line
@@ -25,8 +25,10 @@ msm_gem_address_space_destroy(struct kref *kref)
	struct msm_gem_address_space *aspace = container_of(kref,
			struct msm_gem_address_space, kref);

	if (aspace->ops->destroy)
		aspace->ops->destroy(aspace);
	if (aspace->va_len)
		drm_mm_takedown(&aspace->mm);

	aspace->mmu->funcs->destroy(aspace->mmu);

	kfree(aspace);
}
@@ -37,45 +39,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
		kref_put(&aspace->kref, msm_gem_address_space_destroy);
}

/* SDE address space operations */
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		void *priv)
{

	aspace->mmu->funcs->unmap(aspace->mmu, 0, sgt, priv);

	vma->iova = 0;

	msm_gem_address_space_put(aspace);
}


static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		void *priv, unsigned int flags)
{
	int ret;

	ret = aspace->mmu->funcs->map(aspace->mmu, 0, sgt, flags, priv);
	if (!ret) {
		vma->iova = sg_dma_address(sgt->sgl);

		/* Get a reference to the aspace to keep it around */
		kref_get(&aspace->kref);
	}

	return ret;
}

static const struct msm_gem_aspace_ops smmu_aspace_ops = {
	.map = smmu_aspace_map_vma,
	.unmap = smmu_aspace_unmap_vma,
};

struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
		const char *name)
static struct msm_gem_address_space *
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
		uint64_t start, uint64_t end)
{
	struct msm_gem_address_space *aspace;

@@ -88,125 +54,95 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,

	aspace->name = name;
	aspace->mmu = mmu;
	aspace->ops = &smmu_aspace_ops;

	kref_init(&aspace->kref);

	return aspace;
}

/* GPU address space operations */
struct msm_iommu_aspace {
	struct msm_gem_address_space base;
	struct drm_mm mm;
};

#define to_iommu_aspace(aspace) \
	((struct msm_iommu_aspace *) \
	 container_of(aspace, struct msm_iommu_aspace, base))

static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{
	if (!vma->iova)
		return;

	if (aspace->mmu)
		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, NULL);
	aspace->va_len = end - start;

	drm_mm_remove_node(&vma->node);
	if (aspace->va_len)
		drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
			(end >> PAGE_SHIFT) - 1);

	vma->iova = 0;
	kref_init(&aspace->kref);

	msm_gem_address_space_put(aspace);
	return aspace;
}

static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
		unsigned int flags)
static int allocate_iova(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		u64 *iova)
{
	struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
	size_t size = 0;
	struct scatterlist *sg;
	size_t size = 0;
	int ret, i;

	if (!aspace->va_len)
		return 0;

	if (WARN_ON(drm_mm_node_allocated(&vma->node)))
		return 0;

	for_each_sg(sgt->sgl, sg, sgt->nents, i)
		size += sg->length + sg->offset;

	ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
	ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
			0, DRM_MM_SEARCH_DEFAULT);
	if (ret)
		return ret;

	vma->iova = vma->node.start << PAGE_SHIFT;

	if (aspace->mmu)
		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
			flags, NULL);

	/* Get a reference to the aspace to keep it around */
	kref_get(&aspace->kref);
	if (!ret && iova)
		*iova = vma->node.start << PAGE_SHIFT;

	return ret;
}

static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		void *priv, unsigned int flags)
{
	struct msm_iommu_aspace *local = to_iommu_aspace(aspace);

	drm_mm_takedown(&local->mm);
	aspace->mmu->funcs->destroy(aspace->mmu);
}

static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
	.map = iommu_aspace_map_vma,
	.unmap = iommu_aspace_unmap_vma,
	.destroy = iommu_aspace_destroy,
};
	u64 iova = 0;
	int ret;

static struct msm_gem_address_space *
msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
		uint64_t start, uint64_t end)
{
	struct msm_iommu_aspace *local;
	if (!aspace)
		return -EINVAL;

	if (!mmu)
		return ERR_PTR(-EINVAL);
	ret = allocate_iova(aspace, vma, sgt, &iova);
	if (ret)
		return ret;

	local = kzalloc(sizeof(*local), GFP_KERNEL);
	if (!local)
		return ERR_PTR(-ENOMEM);
	ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
		flags, priv);

	drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
		(end >> PAGE_SHIFT) - 1);
	if (ret) {
		if (drm_mm_node_allocated(&vma->node))
			drm_mm_remove_node(&vma->node);

	local->base.name = name;
	local->base.mmu = mmu;
	local->base.ops = &msm_iommu_aspace_ops;
		return ret;
	}

	kref_init(&local->base.kref);
	vma->iova = sg_dma_address(sgt->sgl);
	kref_get(&aspace->kref);

	return &local->base;
	return 0;
}

int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		void *priv, unsigned int flags)
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{
	if (aspace && aspace->ops->map)
		return aspace->ops->map(aspace, vma, sgt, priv, flags);
	if (!aspace || !vma->iova)
		return;

	return -EINVAL;
	aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);

	if (drm_mm_node_allocated(&vma->node))
		drm_mm_remove_node(&vma->node);

	vma->iova = 0;

	msm_gem_address_space_put(aspace);
}

void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
		const char *name)
{
	if (aspace && aspace->ops->unmap)
		aspace->ops->unmap(aspace, vma, sgt, priv);
	return msm_gem_address_space_new(mmu, name, 0, 0);
}

struct msm_gem_address_space *