Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 313991e7 authored by Charan Teja Reddy's avatar Charan Teja Reddy Committed by Gerrit - the friendly Code Review server
Browse files

iommu/arm-smmu: msm map/unmap calls for slave side secure targets



On slave side access control based targets, TZ is responsible to
prepare and program page table mapping for secure context banks.
Implement separate map/unmap calls accordingly.

Change-Id: Icb366067b39ce6eaae204c42a067d2a1fc4b8cf0
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
parent 65ca63e7
Loading
Loading
Loading
Loading
+69 −0
Original line number Diff line number Diff line
@@ -595,6 +595,16 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);

static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
			       phys_addr_t paddr, size_t size, int prot);
static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
				    unsigned long iova,
				    size_t size);
static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
				     unsigned long iova,
				     struct scatterlist *sg,
				     unsigned int nents, int prot);

static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
	return container_of(dom, struct arm_smmu_domain, domain);
@@ -2596,6 +2606,9 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	if (!ops)
		return -ENODEV;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map(domain, iova, paddr, size, prot);

	arm_smmu_secure_domain_lock(smmu_domain);

	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
@@ -2636,6 +2649,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
	if (!ops)
		return 0;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_unmap(domain, iova, size);

	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
	if (ret)
		return ret;
@@ -2676,6 +2692,9 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	if (!ops)
		return -ENODEV;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);

	arm_smmu_prealloc_memory(smmu_domain, sg, nents, &nonsecure_pool);
	arm_smmu_secure_domain_lock(smmu_domain);

@@ -2881,6 +2900,56 @@ bool arm_smmu_skip_write(void __iomem *addr)

	return false;
}

static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
			       phys_addr_t paddr, size_t size, int prot)
{
	size_t ret;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

	ret = ops->map(ops, iova, paddr, size, prot);

	return ret;
}

static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
				    unsigned long iova,
				    size_t size)
{
	size_t ret;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
	if (ret)
		return ret;

	ret = ops->unmap(ops, iova, size);

	arm_smmu_domain_power_off(domain, smmu_domain->smmu);

	return ret;
}

static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
				     unsigned long iova,
				     struct scatterlist *sg,
				     unsigned int nents, int prot)
{
	int ret;
	size_t size;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

	ret = ops->map_sg(ops, iova, sg, nents, prot, &size);

	if (!ret)
		msm_secure_smmu_unmap(domain, iova, size);

	return ret;
}

#endif

static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
+15 −5
Original line number Diff line number Diff line
@@ -41,6 +41,8 @@

struct msm_secure_io_pgtable {
	struct io_pgtable iop;
	/* lock required while operating on page tables */
	struct mutex pgtbl_lock;
};

int msm_iommu_sec_pgtbl_init(void)
@@ -133,6 +135,7 @@ static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
	flush_va_end = (void *)
		(((unsigned long) flush_va) + sizeof(phys_addr_t));

	mutex_lock(&data->pgtbl_lock);
	/*
	 * Ensure that the buffer is in RAM by the time it gets to TZ
	 */
@@ -142,10 +145,11 @@ static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
				SCM_VAL, SCM_VAL, SCM_VAL);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_MAP2_FLAT), &desc);
		resp = desc.ret[0];
	}
	mutex_unlock(&data->pgtbl_lock);

	if (ret || resp)
		return -EINVAL;
@@ -258,10 +262,12 @@ static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,

	flush_va_end = (void *) (((unsigned long) flush_va) +
			(cnt * sizeof(*pa_list)));

	mutex_lock(&data->pgtbl_lock);
	dmac_clean_range(flush_va, flush_va_end);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_MAP2_FLAT), &desc);
		resp = desc.ret[0];

@@ -270,6 +276,7 @@ static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
		else
			ret = len;
	}
	mutex_unlock(&data->pgtbl_lock);

	kfree(pa_list);
	return ret;
@@ -293,13 +300,15 @@ static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
	desc.args[4] = IOMMU_TLBINVAL_FLAG;
	desc.arginfo = SCM_ARGS(5);

	mutex_lock(&data->pgtbl_lock);
	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_UNMAP2_FLAT), &desc);

		if (!ret)
			ret = len;
	}
	mutex_unlock(&data->pgtbl_lock);
	return ret;
}

@@ -324,6 +333,7 @@ msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
		.unmap		= msm_secure_unmap,
		.iova_to_phys	= msm_secure_iova_to_phys,
	};
	mutex_init(&data->pgtbl_lock);

	return data;
}
+23 −0
Original line number Diff line number Diff line
@@ -102,6 +102,29 @@ static inline int register_iommu_sec_ptbl(void)
{
	return -EINVAL;
}

static inline size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
					   unsigned long iova,
					   size_t size)
{
	return -EINVAL;
}

static inline size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
					    unsigned long iova,
					    struct scatterlist *sg,
					    unsigned int nents, int prot)
{
	return -EINVAL;
}

static inline int msm_secure_smmu_map(struct iommu_domain *domain,
				      unsigned long iova,
				      phys_addr_t paddr, size_t size, int prot)
{
	return -EINVAL;
}

#endif /* CONFIG_MSM_TZ_SMMU */

#endif /* __MSM_TZ_SMMU_H__ */