Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b9bf435a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu/arm-smmu: pass CB index for smmu programming by TZ"

parents 6572fa0b 99e7cd32
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -74,6 +74,9 @@ conditions.
                  address size faults are due to a fundamental programming
                  error from which we don't care about recovering anyways.

- qcom,tz-device-id : A string indicating the device ID for this SMMU known
		  to TZ.  See msm_tz_smmu.c for a full list of mappings.

- qcom,skip-init : Disable resetting configuration for all context banks
                  during device reset.  This is useful for targets where
                  some context banks are dedicated to other execution
+1 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o
obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
+1 −0
Original line number Diff line number Diff line
@@ -109,6 +109,7 @@
#define SMR_VALID			(1 << 31)
#define SMR_MASK_SHIFT			16
#define SMR_MASK_MASK			0x7FFF
#define SID_MASK			0x7FFF
#define SMR_ID_SHIFT			0

#define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
+174 −19
Original line number Diff line number Diff line
@@ -57,6 +57,8 @@

#include <linux/amba/bus.h>

#include <soc/qcom/msm_tz_smmu.h>
#include <soc/qcom/scm.h>
#include "io-pgtable.h"
#include "arm-smmu-regs.h"

@@ -225,6 +227,7 @@ struct arm_smmu_device {
	void __iomem			*base;
	void __iomem			*cb_base;
	unsigned long			size;
	phys_addr_t			phys_addr;
	unsigned long			pgshift;

#define ARM_SMMU_FEAT_COHERENT_WALK	(1 << 0)
@@ -298,6 +301,8 @@ struct arm_smmu_device {

	struct arm_smmu_arch_ops	*arch_ops;
	void				*archdata;

	enum tz_smmu_device_id		sec_id;
};

enum arm_smmu_context_fmt {
@@ -355,6 +360,7 @@ struct arm_smmu_domain {
	spinlock_t			cb_lock; /* Serialises ATS1* ops */
	struct io_pgtable_cfg		pgtbl_cfg;
	u32 attributes;
	bool				slave_side_secure;
	u32				secure_vmid;
	struct list_head		pte_info_list;
	struct list_head		unassign_list;
@@ -405,6 +411,8 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain,
				struct device *dev);

static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);

static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
@@ -432,6 +440,22 @@ static bool is_dynamic_domain(struct iommu_domain *domain)
	return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
}

static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
{
	int ret;
	int scm_ret = 0;

	if (!arm_smmu_is_static_cb(smmu))
		return 0;

	ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
	if (ret || scm_ret) {
		pr_err("scm call IOMMU_SECURE_CFG failed\n");
		return -EINVAL;
	}

	return 0;
}
static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
{
	if (smmu_domain->attributes &
@@ -448,20 +472,32 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
	return smmu->options & ARM_SMMU_OPT_STATIC_CB;
}

static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
{
	return (smmu_domain->secure_vmid != VMID_INVAL);
}

static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
{
	return arm_smmu_has_secure_vmid(smmu_domain) &&
			smmu_domain->slave_side_secure;
}

static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
{
	return arm_smmu_has_secure_vmid(smmu_domain)
			&& !smmu_domain->slave_side_secure;
}

static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
{
	if (arm_smmu_is_domain_secure(smmu_domain))
	if (arm_smmu_is_master_side_secure(smmu_domain))
		mutex_lock(&smmu_domain->assign_lock);
}

static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
{
	if (arm_smmu_is_domain_secure(smmu_domain))
	if (arm_smmu_is_master_side_secure(smmu_domain))
		mutex_unlock(&smmu_domain->assign_lock);
}

@@ -1109,7 +1145,7 @@ static void *arm_smmu_alloc_pages_exact(void *cookie,
	void *page;
	struct arm_smmu_domain *smmu_domain = cookie;

	if (!arm_smmu_is_domain_secure(smmu_domain))
	if (!arm_smmu_is_master_side_secure(smmu_domain))
		return alloc_pages_exact(size, gfp_mask);

	page = arm_smmu_secure_pool_remove(smmu_domain, size);
@@ -1132,7 +1168,7 @@ static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
{
	struct arm_smmu_domain *smmu_domain = cookie;

	if (!arm_smmu_is_domain_secure(smmu_domain)) {
	if (!arm_smmu_is_master_side_secure(smmu_domain)) {
		free_pages_exact(virt, size);
		return;
	}
@@ -1358,6 +1394,22 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
	return IRQ_HANDLED;
}

static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
				  struct io_pgtable_cfg *pgtbl_cfg)
{
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
	int ret = 0;

	if ((smmu->version > ARM_SMMU_V1) &&
	    (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
	    !arm_smmu_has_secure_vmid(smmu_domain) &&
	    arm_smmu_is_static_cb(smmu)) {
		ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
	}
	return ret;
}

static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
				       struct io_pgtable_cfg *pgtbl_cfg)
{
@@ -1707,6 +1759,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		cfg->irptndx = cfg->cbndx;
	}

	if (arm_smmu_is_slave_side_secure(smmu_domain)) {
		smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
			.quirks         = quirks,
			.pgsize_bitmap  = smmu->pgsize_bitmap,
			.arm_msm_secure_cfg = {
				.sec_id = smmu->sec_id,
				.cbndx = cfg->cbndx,
			},
			.iommu_dev      = smmu->dev,
		};
		fmt = ARM_MSM_SECURE;
	} else  {
		smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
			.quirks		= quirks,
			.pgsize_bitmap	= smmu->pgsize_bitmap,
@@ -1715,6 +1779,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
			.tlb		= tlb_ops,
			.iommu_dev	= smmu->dev,
		};
	}

	smmu_domain->smmu = smmu;
	smmu_domain->dev = dev;
@@ -1752,6 +1817,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,

		arm_smmu_arch_init_context_bank(smmu_domain, dev);

		/* for slave side secure, we may have to force the pagetable
		 * format to V8L.
		 */
		ret = arm_smmu_set_pt_format(smmu_domain,
					     &smmu_domain->pgtbl_cfg);
		if (ret)
			goto out_clear_smmu;


		/*
		 * Request context fault interrupt. Do this last to avoid the
		 * handler seeing a half-initialised domain state.
@@ -1940,6 +2014,15 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
	if (!smmu->smrs)
		return;

	/* For slave side secure targets, as we can't write to the
	 * global space, set the sme mask values to default.
	 */
	if (arm_smmu_is_static_cb(smmu)) {
		smmu->streamid_mask = SID_MASK;
		smmu->smr_mask_mask = SMR_MASK_MASK;
		return;
	}

	/* ID0 */
	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
	size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
@@ -2113,6 +2196,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
	const struct iommu_gather_ops *tlb;

	tlb = smmu_domain->pgtbl_cfg.tlb;
	if (!tlb)
		return;

	mutex_lock(&smmu->stream_map_mutex);
	for_each_cfg_sme(fwspec, i, idx) {
@@ -2194,7 +2279,7 @@ static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
	int source_vmid = VMID_HLOS;
	struct arm_smmu_pte_info *pte_info, *temp;

	if (!arm_smmu_is_domain_secure(smmu_domain))
	if (!arm_smmu_is_master_side_secure(smmu_domain))
		return ret;

	list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
@@ -2221,7 +2306,7 @@ static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
	int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
	struct arm_smmu_pte_info *pte_info, *temp;

	if (!arm_smmu_is_domain_secure(smmu_domain))
	if (!arm_smmu_is_master_side_secure(smmu_domain))
		return;

	list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
@@ -2245,8 +2330,14 @@ static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_pte_info *pte_info;

	if (WARN_ON(!arm_smmu_is_domain_secure(smmu_domain)))
	if (smmu_domain->slave_side_secure ||
	    !arm_smmu_has_secure_vmid(smmu_domain)) {
		if (smmu_domain->slave_side_secure)
			WARN(1, "slave side secure is enforced\n");
		else
			WARN(1, "Invalid VMID is set !!\n");
		return;
	}

	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
	if (!pte_info)
@@ -2262,6 +2353,15 @@ static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
	struct arm_smmu_domain *smmu_domain = cookie;
	struct arm_smmu_pte_info *pte_info;

	if (smmu_domain->slave_side_secure ||
	    !arm_smmu_has_secure_vmid(smmu_domain)) {
		if (smmu_domain->slave_side_secure)
			WARN(1, "slave side secure is enforced\n");
		else
			WARN(1, "Invalid VMID is set !!\n");
		return -EINVAL;
	}

	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
	if (!pte_info)
		return -ENOMEM;
@@ -3337,7 +3437,20 @@ static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)

	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
	reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;

	if (arm_smmu_is_static_cb(smmu)) {
		phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
							smmu->phys_addr;

		if (scm_io_write(impl_def1_base_phys +
					IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
			dev_err(smmu->dev,
				"scm_io_write fail. SMMU might not be halted");
			return -EINVAL;
		}
	} else {
		writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
	}

	return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
}
@@ -3359,8 +3472,19 @@ static void qsmmuv2_resume(struct arm_smmu_device *smmu)

	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
	reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;

	if (arm_smmu_is_static_cb(smmu)) {
		phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
							smmu->phys_addr;

		if (scm_io_write(impl_def1_base_phys +
				IMPL_DEF1_MICRO_MMU_CTRL, reg))
			dev_err(smmu->dev,
				"scm_io_write fail. SMMU might not be resumed");
	} else {
		writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
	}
}

static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
{
@@ -3601,6 +3725,9 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain,
			cb = smmu->s2crs[idx].cbndx;
	}

	if (cb >= 0 && arm_smmu_is_static_cb(smmu))
		smmu_domain->slave_side_secure = true;

	if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
		mutex_unlock(&smmu->stream_map_mutex);
		return __arm_smmu_alloc_bitmap(smmu->context_map,
@@ -3610,6 +3737,7 @@ static int arm_smmu_alloc_cb(struct iommu_domain *domain,

	for (i = 0; i < smmu->num_mapping_groups; i++) {
		if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
			if (!arm_smmu_is_static_cb(smmu))
				smmu->s2crs[i].cb_handoff = false;
			smmu->s2crs[i].count -= 1;
		}
@@ -3862,6 +3990,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
	bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
	int i;

	if (arm_smmu_restore_sec_cfg(smmu, 0))
		return -ENODEV;

	dev_dbg(smmu->dev, "probing hardware configuration...\n");
	dev_dbg(smmu->dev, "SMMUv%d with:\n",
			smmu->version == ARM_SMMU_V2 ? 2 : 1);
@@ -4101,6 +4232,24 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);

#ifdef CONFIG_MSM_TZ_SMMU
int register_iommu_sec_ptbl(void)
{
	struct device_node *np;

	for_each_matching_node(np, arm_smmu_of_match)
		if (of_find_property(np, "qcom,tz-device-id", NULL) &&
				of_device_is_available(np))
			break;
	if (!np)
		return -ENODEV;

	of_node_put(np);

	return msm_iommu_sec_pgtbl_init();
}
#endif

#ifdef CONFIG_ACPI
static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
{
@@ -4225,6 +4374,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res)
		smmu->phys_addr = res->start;
	smmu->base = devm_ioremap_resource(dev, res);
	if (IS_ERR(smmu->base))
		return PTR_ERR(smmu->base);
@@ -4277,6 +4428,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
	if (err)
		goto out_exit_power_resources;

	smmu->sec_id = msm_dev_to_device_id(dev);
	err = arm_smmu_device_cfg_probe(smmu);
	if (err)
		goto out_power_off;
@@ -4419,6 +4571,9 @@ static int __init arm_smmu_init(void)
		return ret;

	ret = platform_driver_register(&arm_smmu_driver);
#ifdef CONFIG_MSM_TZ_SMMU
	ret = register_iommu_sec_ptbl();
#endif
	registered = !ret;
	return ret;
}
+350 −0
Original line number Diff line number Diff line
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#define pr_fmt(fmt)	"io-pgtable-msm-secure: " fmt

#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <soc/qcom/scm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>

#include "io-pgtable.h"

#define IOMMU_SECURE_PTBL_SIZE  3
#define IOMMU_SECURE_PTBL_INIT  4
#define IOMMU_SECURE_MAP2_FLAT 0x12
#define IOMMU_SECURE_UNMAP2_FLAT 0x13
#define IOMMU_TLBINVAL_FLAG 0x00000001

#define io_pgtable_to_data(x)						\
	container_of((x), struct msm_secure_io_pgtable, iop)

#define io_pgtable_ops_to_pgtable(x)					\
	container_of((x), struct io_pgtable, ops)

#define io_pgtable_ops_to_data(x)					\
	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))

struct msm_secure_io_pgtable {
	struct io_pgtable iop;
};

int msm_iommu_sec_pgtbl_init(void)
{
	int psize[2] = {0, 0};
	unsigned int spare = 0;
	int ret, ptbl_ret = 0;
	struct device dev = {0};
	void *cpu_addr;
	dma_addr_t paddr;
	unsigned long attrs = 0;

	if (is_scm_armv8()) {
		struct scm_desc desc = {0};

		desc.args[0] = spare;
		desc.arginfo = SCM_ARGS(1);
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_PTBL_SIZE), &desc);
		psize[0] = desc.ret[0];
		psize[1] = desc.ret[1];
		if (ret || psize[1]) {
			pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
			return ret;
		}
	}

	/* Now allocate memory for the secure page tables */
	attrs = DMA_ATTR_NO_KERNEL_MAPPING;
	dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	arch_setup_dma_ops(&dev, 0, 0, NULL, 1);
	cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
	if (!cpu_addr) {
		pr_err("%s: Failed to allocate %d bytes for PTBL\n",
				__func__, psize[0]);
		return -ENOMEM;
	}

	if (is_scm_armv8()) {
		struct scm_desc desc = {0};

		desc.args[0] = paddr;
		desc.args[1] = psize[0];
		desc.args[2] = 0;
		desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_PTBL_INIT), &desc);
		ptbl_ret = desc.ret[0];

		if (ret) {
			pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
			return ret;
		}

		if (ptbl_ret) {
			pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
			return ret;
		}
	}

	return 0;
}
EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);

static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
			phys_addr_t paddr, size_t size, int iommu_prot)
{
	struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
	struct io_pgtable_cfg *cfg = &data->iop.cfg;
	void *flush_va, *flush_va_end;
	struct scm_desc desc = {0};
	int ret = -EINVAL;
	u32 resp;

	if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) ||
			!IS_ALIGNED(size, SZ_1M))
		return -EINVAL;

	desc.args[0] = virt_to_phys(&paddr);
	desc.args[1] = 1;
	desc.args[2] = size;
	desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
	desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
	desc.args[5] = iova;
	desc.args[6] = size;
	desc.args[7] = 0;

	flush_va = &paddr;
	flush_va_end = (void *)
		(((unsigned long) flush_va) + sizeof(phys_addr_t));

	/*
	 * Ensure that the buffer is in RAM by the time it gets to TZ
	 */
	dmac_clean_range(flush_va, flush_va_end);

	desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
				SCM_VAL, SCM_VAL, SCM_VAL);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_MAP2_FLAT), &desc);
		resp = desc.ret[0];
	}

	if (ret || resp)
		return -EINVAL;

	return 0;
}

static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg)
{
	/*
	 * Try sg_dma_address first so that we can
	 * map carveout regions that do not have a
	 * struct page associated with them.
	 */
	dma_addr_t pa = sg_dma_address(sg);

	if (pa == 0)
		pa = sg_phys(sg);
	return pa;
}

static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
			   struct scatterlist *sg, unsigned int nents,
			   int iommu_prot, size_t *size)
{
	struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
	struct io_pgtable_cfg *cfg = &data->iop.cfg;
	int ret = -EINVAL;
	struct scatterlist *tmp, *sgiter;
	dma_addr_t *pa_list = 0;
	unsigned int cnt, offset = 0, chunk_offset = 0;
	dma_addr_t pa;
	void *flush_va, *flush_va_end;
	unsigned long len = 0;
	struct scm_desc desc = {0};
	int i;
	u32 resp;

	for_each_sg(sg, tmp, nents, i)
		len += tmp->length;

	if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
		return -EINVAL;

	if (sg->length == len) {
		cnt = 1;
		pa = msm_secure_get_phys_addr(sg);
		if (!IS_ALIGNED(pa, SZ_1M))
			return -EINVAL;

		desc.args[0] = virt_to_phys(&pa);
		desc.args[1] = cnt;
		desc.args[2] = len;
		flush_va = &pa;
	} else {
		sgiter = sg;
		if (!IS_ALIGNED(sgiter->length, SZ_1M))
			return -EINVAL;
		cnt = sg->length / SZ_1M;
		while ((sgiter = sg_next(sgiter))) {
			if (!IS_ALIGNED(sgiter->length, SZ_1M))
				return -EINVAL;
			cnt += sgiter->length / SZ_1M;
		}

		pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL);
		if (!pa_list)
			return -ENOMEM;

		sgiter = sg;
		cnt = 0;
		pa = msm_secure_get_phys_addr(sgiter);
		while (offset < len) {

			if (!IS_ALIGNED(pa, SZ_1M)) {
				kfree(pa_list);
				return -EINVAL;
			}

			pa_list[cnt] = pa + chunk_offset;
			chunk_offset += SZ_1M;
			offset += SZ_1M;
			cnt++;

			if (chunk_offset >= sgiter->length && offset < len) {
				chunk_offset = 0;
				sgiter = sg_next(sgiter);
				pa = msm_secure_get_phys_addr(sgiter);
			}
		}

		desc.args[0] = virt_to_phys(pa_list);
		desc.args[1] = cnt;
		desc.args[2] = SZ_1M;
		flush_va = pa_list;
	}

	desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
	desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
	desc.args[5] = iova;
	desc.args[6] = len;
	desc.args[7] = 0;

	desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
			SCM_VAL, SCM_VAL, SCM_VAL);

	/*
	 * Ensure that the buffer is in RAM by the time it gets to TZ
	 */

	flush_va_end = (void *) (((unsigned long) flush_va) +
			(cnt * sizeof(*pa_list)));
	dmac_clean_range(flush_va, flush_va_end);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
					 IOMMU_SECURE_MAP2_FLAT), &desc);
		resp = desc.ret[0];

		if (ret || resp)
			ret = -EINVAL;
		else
			ret = len;
	}

	kfree(pa_list);
	return ret;
}

static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
			  size_t len)
{
	struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
	struct io_pgtable_cfg *cfg = &data->iop.cfg;
	int ret = -EINVAL;
	struct scm_desc desc = {0};

	if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
		return ret;

	desc.args[0] = cfg->arm_msm_secure_cfg.sec_id;
	desc.args[1] = cfg->arm_msm_secure_cfg.cbndx;
	desc.args[2] = iova;
	desc.args[3] = len;
	desc.args[4] = IOMMU_TLBINVAL_FLAG;
	desc.arginfo = SCM_ARGS(5);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
			IOMMU_SECURE_UNMAP2_FLAT), &desc);

		if (!ret)
			ret = len;
	}
	return ret;
}

static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops,
					 unsigned long iova)
{
	return -EINVAL;
}

static struct msm_secure_io_pgtable *
msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
{
	struct msm_secure_io_pgtable *data;

	data = kmalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return NULL;

	data->iop.ops = (struct io_pgtable_ops) {
		.map		= msm_secure_map,
		.map_sg		= msm_secure_map_sg,
		.unmap		= msm_secure_unmap,
		.iova_to_phys	= msm_secure_iova_to_phys,
	};

	return data;
}

static struct io_pgtable *
msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
	struct msm_secure_io_pgtable *data =
		msm_secure_alloc_pgtable_data(cfg);

	return &data->iop;
}

static void msm_secure_free_pgtable(struct io_pgtable *iop)
{
	struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop);

	kfree(data);
}

struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = {
	.alloc	= msm_secure_alloc_pgtable,
	.free	= msm_secure_free_pgtable,
};
Loading