Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f57233cd authored by Shubhraprakash Das's avatar Shubhraprakash Das
Browse files

iommu: msm: Ensure secure buffer mapping at 1MB



Secure buffer mappings should be 1MB aligned. Add checks
for this.

CRs-Fixed: 755014
Change-Id: I377e7beaeb21cce9a828590bab20cc1cf41fd8c6
Signed-off-by: default avatarShubhraprakash Das <sadas@codeaurora.org>
parent 8429a262
Loading
Loading
Loading
Loading
+27 −1
Original line number Diff line number Diff line
@@ -519,6 +519,9 @@ static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
	void *flush_va, *flush_va_end;
	int ret = 0;

	if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) ||
		!IS_ALIGNED(pa, SZ_1M))
		return -EINVAL;
	map.plist.list = virt_to_phys(&pa);
	map.plist.list_size = 1;
	map.plist.size = len;
@@ -568,22 +571,36 @@ static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
	unsigned int offset = 0, chunk_offset = 0;
	int ret;

	if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
		return -EINVAL;

	map.info.id = iommu_drvdata->sec_id;
	map.info.ctx_id = ctx_drvdata->num;
	map.info.va = va;
	map.info.size = len;

	if (sg->length == len) {
		/*
		 * physical address for secure mapping needs
		 * to be 1MB aligned
		 */
		pa = get_phys_addr(sg);
		if (!IS_ALIGNED(pa, SZ_1M))
			return -EINVAL;
		map.plist.list = virt_to_phys(&pa);
		map.plist.list_size = 1;
		map.plist.size = len;
		flush_va = &pa;
	} else {
		sgiter = sg;
		if (!IS_ALIGNED(sgiter->length, SZ_1M))
			return -EINVAL;
		cnt = sg->length / SZ_1M;
		while ((sgiter = sg_next(sgiter)))
		while ((sgiter = sg_next(sgiter))) {
			if (!IS_ALIGNED(sgiter->length, SZ_1M))
				return -EINVAL;
			cnt += sgiter->length / SZ_1M;
		}

		pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
		if (!pa_list)
@@ -592,6 +609,10 @@ static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
		sgiter = sg;
		cnt = 0;
		pa = get_phys_addr(sgiter);
		if (!IS_ALIGNED(pa, SZ_1M)) {
			kfree(pa_list);
			return -EINVAL;
		}
		while (offset < len) {
			pa += chunk_offset;
			pa_list[cnt] = pa;
@@ -637,6 +658,8 @@ static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
	int ret, scm_ret;
	struct scm_desc desc = {0};

	if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
		return -EINVAL;
	desc.args[0] = unmap.info.id = iommu_drvdata->sec_id;
	desc.args[1] = unmap.info.ctx_id = ctx_drvdata->num;
	desc.args[2] = unmap.info.va = va;
@@ -877,6 +900,9 @@ static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
	struct msm_iommu_ctx_drvdata *ctx_drvdata;
	int ret = -EINVAL;

	if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
		return -EINVAL;

	iommu_access_ops->iommu_lock_acquire(0);

	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);