Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e45c307b authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu/arm-smmu: msm map/unmap calls for slave side secure targets"

parents ccb874a7 42116ced
Loading
Loading
Loading
Loading
+120 −4
Original line number Diff line number Diff line
@@ -414,6 +414,16 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);

static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
			       phys_addr_t paddr, size_t size, int prot);
static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
				    unsigned long iova,
				    size_t size);
static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
				     unsigned long iova,
				     struct scatterlist *sg,
				     unsigned int nents, int prot);

static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
	return container_of(dom, struct arm_smmu_domain, domain);
@@ -1201,6 +1211,28 @@ static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
	.free_pages_exact = arm_smmu_free_pages_exact,
};

static void msm_smmu_tlb_inv_context(void *cookie)
{
}

static void msm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
					  size_t granule, bool leaf,
					  void *cookie)
{
}

static void msm_smmu_tlb_sync(void *cookie)
{
}

static struct iommu_gather_ops msm_smmu_gather_ops = {
	.tlb_flush_all	= msm_smmu_tlb_inv_context,
	.tlb_add_flush	= msm_smmu_tlb_inv_range_nosync,
	.tlb_sync	= msm_smmu_tlb_sync,
	.alloc_pages_exact = arm_smmu_alloc_pages_exact,
	.free_pages_exact = arm_smmu_free_pages_exact,
};

static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
					 dma_addr_t iova, u32 fsr)
{
@@ -1394,6 +1426,19 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
	return IRQ_HANDLED;
}

static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
				     struct iommu_fwspec *fwspec)
{
	int i, idx;

	for_each_cfg_sme(fwspec, i, idx) {
		if (smmu->s2crs[idx].attach_count)
			return true;
	}

	return false;
}

static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
				  struct io_pgtable_cfg *pgtbl_cfg)
{
@@ -1747,6 +1792,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		(smmu->model == QCOM_SMMUV500))
		quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		tlb_ops = &msm_smmu_gather_ops;

	ret = arm_smmu_alloc_cb(domain, smmu, dev);
	if (ret < 0)
		goto out_unlock;
@@ -1767,6 +1815,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
				.sec_id = smmu->sec_id,
				.cbndx = cfg->cbndx,
			},
			.tlb		= tlb_ops,
			.iommu_dev      = smmu->dev,
		};
		fmt = ARM_MSM_SECURE;
@@ -1847,6 +1896,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,

	/* Publish page table ops for map/unmap */
	smmu_domain->pgtbl_ops = pgtbl_ops;
	if (arm_smmu_is_slave_side_secure(smmu_domain) &&
			!arm_smmu_master_attached(smmu, dev->iommu_fwspec))
		arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);

	return 0;

out_clear_smmu:
@@ -2196,8 +2249,6 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
	const struct iommu_gather_ops *tlb;

	tlb = smmu_domain->pgtbl_cfg.tlb;
	if (!tlb)
		return;

	mutex_lock(&smmu->stream_map_mutex);
	for_each_cfg_sme(fwspec, i, idx) {
@@ -2452,6 +2503,9 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
	if (!ops)
		return -ENODEV;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map(domain, iova, paddr, size, prot);

	arm_smmu_secure_domain_lock(smmu_domain);

	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
@@ -2491,6 +2545,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
	if (!ops)
		return 0;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_unmap(domain, iova, size);

	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
	if (ret)
		return ret;
@@ -2528,6 +2585,9 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
	if (!ops)
		return -ENODEV;

	if (arm_smmu_is_slave_side_secure(smmu_domain))
		return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);

	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
	if (ret)
		return ret;
@@ -2703,6 +2763,58 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
	return dev ? dev_get_drvdata(dev) : NULL;
}

#ifdef CONFIG_MSM_TZ_SMMU
static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
			       phys_addr_t paddr, size_t size, int prot)
{
	size_t ret;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

	ret = ops->map(ops, iova, paddr, size, prot);

	return ret;
}

static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
				    unsigned long iova,
				    size_t size)
{
	size_t ret;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
	if (ret)
		return ret;

	ret = ops->unmap(ops, iova, size);

	arm_smmu_domain_power_off(domain, smmu_domain->smmu);

	return ret;
}

static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
				     unsigned long iova,
				     struct scatterlist *sg,
				     unsigned int nents, int prot)
{
	int ret;
	size_t size;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

	ret = ops->map_sg(ops, iova, sg, nents, prot, &size);

	if (!ret)
		msm_secure_smmu_unmap(domain, iova, size);

	return ret;
}

#endif

static int arm_smmu_add_device(struct device *dev)
{
	struct arm_smmu_device *smmu;
@@ -4374,7 +4486,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res)
	if (res == NULL) {
		dev_err(dev, "no MEM resource info\n");
		return -EINVAL;
	}

	smmu->phys_addr = res->start;
	smmu->base = devm_ioremap_resource(dev, res);
	if (IS_ERR(smmu->base))
+15 −5
Original line number Diff line number Diff line
@@ -41,6 +41,8 @@

struct msm_secure_io_pgtable {
	struct io_pgtable iop;
	/* lock required while operating on page tables */
	struct mutex pgtbl_lock;
};

int msm_iommu_sec_pgtbl_init(void)
@@ -133,6 +135,7 @@ static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
	flush_va_end = (void *)
		(((unsigned long) flush_va) + sizeof(phys_addr_t));

	mutex_lock(&data->pgtbl_lock);
	/*
	 * Ensure that the buffer is in RAM by the time it gets to TZ
	 */
@@ -142,10 +145,11 @@ static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
				SCM_VAL, SCM_VAL, SCM_VAL);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_MAP2_FLAT), &desc);
		resp = desc.ret[0];
	}
	mutex_unlock(&data->pgtbl_lock);

	if (ret || resp)
		return -EINVAL;
@@ -258,10 +262,12 @@ static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,

	flush_va_end = (void *) (((unsigned long) flush_va) +
			(cnt * sizeof(*pa_list)));

	mutex_lock(&data->pgtbl_lock);
	dmac_clean_range(flush_va, flush_va_end);

	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_MAP2_FLAT), &desc);
		resp = desc.ret[0];

@@ -270,6 +276,7 @@ static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
		else
			ret = len;
	}
	mutex_unlock(&data->pgtbl_lock);

	kfree(pa_list);
	return ret;
@@ -293,13 +300,15 @@ static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
	desc.args[4] = IOMMU_TLBINVAL_FLAG;
	desc.arginfo = SCM_ARGS(5);

	mutex_lock(&data->pgtbl_lock);
	if (is_scm_armv8()) {
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				IOMMU_SECURE_UNMAP2_FLAT), &desc);

		if (!ret)
			ret = len;
	}
	mutex_unlock(&data->pgtbl_lock);
	return ret;
}

@@ -324,6 +333,7 @@ msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
		.unmap		= msm_secure_unmap,
		.iova_to_phys	= msm_secure_iova_to_phys,
	};
	mutex_init(&data->pgtbl_lock);

	return data;
}
+23 −0
Original line number Diff line number Diff line
@@ -88,6 +88,29 @@ static inline int register_iommu_sec_ptbl(void)
{
	return -EINVAL;
}

static inline size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
					   unsigned long iova,
					   size_t size)
{
	return -EINVAL;
}

static inline size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
					    unsigned long iova,
					    struct scatterlist *sg,
					    unsigned int nents, int prot)
{
	return -EINVAL;
}

static inline int msm_secure_smmu_map(struct iommu_domain *domain,
				      unsigned long iova,
				      phys_addr_t paddr, size_t size, int prot)
{
	return -EINVAL;
}

#endif /* CONFIG_MSM_TZ_SMMU */

#endif /* __MSM_TZ_SMMU_H__ */