Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d6486e9 authored by Patrick Daly's avatar Patrick Daly Committed by Gerrit - the friendly Code Review server
Browse files

iommu: arm-smmu: Add support for ACTLR settings for sdm855



Some MMU500 implementations have additional settings in the ACTLR
register. Retrieve the settings from devicetree and validate that
all devices in the same iommu_group share the same ACTLR settings.

Change-Id: Idbe32ac3b1b6774cfbc9cc5218eb9605a67d0637
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
[sudaraja@codeaurora.org: fixed merge conflicts]
Signed-off-by: default avatarSudarshan Rajagopalan <sudaraja@codeaurora.org>
parent 87ef03a5
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -96,6 +96,11 @@ conditions.
		  retention. No cache invalidation operations involving asid
		  retention. No cache invalidation operations involving asid
		  may be used.
		  may be used.


- qcom,actlr:
		  An array of <sid mask actlr-setting>.
		  Any sid X for which X&~mask==sid will be programmed with the
		  given actlr-setting.

- qcom,deferred-regulator-disable-delay : The time delay for deferred regulator
- qcom,deferred-regulator-disable-delay : The time delay for deferred regulator
                  disable in ms. In case of unmap call, regulator is
                  disable in ms. In case of unmap call, regulator is
                  enabled/disabled. This may introduce additional delay. For
                  enabled/disabled. This may introduce additional delay. For
+241 −34
Original line number Original line Diff line number Diff line
@@ -130,14 +130,6 @@ enum arm_smmu_implementation {
	QCOM_SMMUV500,
	QCOM_SMMUV500,
};
};


struct arm_smmu_device;
struct arm_smmu_arch_ops {
	int (*init)(struct arm_smmu_device *smmu);
	void (*device_reset)(struct arm_smmu_device *smmu);
	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
					 dma_addr_t iova);
};

struct arm_smmu_impl_def_reg {
struct arm_smmu_impl_def_reg {
	u32 offset;
	u32 offset;
	u32 value;
	u32 value;
@@ -226,6 +218,7 @@ struct arm_smmu_power_resources {
	int				regulator_defer;
	int				regulator_defer;
};
};


struct arm_smmu_arch_ops;
struct arm_smmu_device {
struct arm_smmu_device {
	struct device			*dev;
	struct device			*dev;


@@ -399,9 +392,6 @@ static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);


static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);

static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
				    dma_addr_t iova);
				    dma_addr_t iova);


@@ -465,6 +455,76 @@ static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
		mutex_unlock(&smmu_domain->assign_lock);
		mutex_unlock(&smmu_domain->assign_lock);
}
}


/*
 * init()
 * Hook for additional device tree parsing at probe time.
 *
 * device_reset()
 * Hook for one-time architecture-specific register settings.
 *
 * iova_to_phys_hard()
 * Provides debug information. May be called from the context fault irq handler.
 *
 * init_context_bank()
 * Hook for architecture-specific settings which require knowledge of the
 * dynamically allocated context bank number.
 *
 * device_group()
 * Hook for checking whether a device is compatible with a said group.
 */
struct arm_smmu_arch_ops {
	int (*init)(struct arm_smmu_device *smmu);
	void (*device_reset)(struct arm_smmu_device *smmu);
	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
					 dma_addr_t iova);
	void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
					struct device *dev);
	int (*device_group)(struct device *dev, struct iommu_group *group);
};

static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
{
	if (!smmu->arch_ops)
		return 0;
	if (!smmu->arch_ops->init)
		return 0;
	return smmu->arch_ops->init(smmu);
}

static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
{
	if (!smmu->arch_ops)
		return;
	if (!smmu->arch_ops->device_reset)
		return;
	return smmu->arch_ops->device_reset(smmu);
}

static void arm_smmu_arch_init_context_bank(
		struct arm_smmu_domain *smmu_domain, struct device *dev)
{
	struct arm_smmu_device *smmu = smmu_domain->smmu;

	if (!smmu->arch_ops)
		return;
	if (!smmu->arch_ops->init_context_bank)
		return;
	return smmu->arch_ops->init_context_bank(smmu_domain, dev);
}

static int arm_smmu_arch_device_group(struct device *dev,
					struct iommu_group *group)
{
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);

	if (!smmu->arch_ops)
		return 0;
	if (!smmu->arch_ops->device_group)
		return 0;
	return smmu->arch_ops->device_group(dev, group);
}

static struct device_node *dev_get_dev_node(struct device *dev)
static struct device_node *dev_get_dev_node(struct device *dev)
{
{
	if (dev_is_pci(dev)) {
	if (dev_is_pci(dev)) {
@@ -1671,6 +1731,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		arm_smmu_write_context_bank(smmu, cfg->cbndx,
		arm_smmu_write_context_bank(smmu, cfg->cbndx,
						smmu_domain->attributes );
						smmu_domain->attributes );


		arm_smmu_arch_init_context_bank(smmu_domain, dev);

		/*
		/*
		 * Request context fault interrupt. Do this last to avoid the
		 * Request context fault interrupt. Do this last to avoid the
		 * handler seeing a half-initialised domain state.
		 * handler seeing a half-initialised domain state.
@@ -2626,13 +2688,22 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
	}
	}


	if (group)
	if (group)
		return iommu_group_ref_get(group);
		iommu_group_ref_get(group);

	else {
		if (dev_is_pci(dev))
		if (dev_is_pci(dev))
			group = pci_device_group(dev);
			group = pci_device_group(dev);
		else
		else
			group = generic_device_group(dev);
			group = generic_device_group(dev);


		if (IS_ERR(group))
			return NULL;
	}

	if (arm_smmu_arch_device_group(dev, group)) {
		iommu_group_put(group);
		return ERR_PTR(-EINVAL);
	}

	return group;
	return group;
}
}


@@ -3943,24 +4014,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
	return 0;
	return 0;
}
}


static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
{
	if (!smmu->arch_ops)
		return 0;
	if (!smmu->arch_ops->init)
		return 0;
	return smmu->arch_ops->init(smmu);
}

static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
{
	if (!smmu->arch_ops)
		return;
	if (!smmu->arch_ops->device_reset)
		return;
	return smmu->arch_ops->device_reset(smmu);
}

struct arm_smmu_match_data {
struct arm_smmu_match_data {
	enum arm_smmu_arch_version version;
	enum arm_smmu_arch_version version;
	enum arm_smmu_implementation model;
	enum arm_smmu_implementation model;
@@ -4379,10 +4432,18 @@ IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);


#define TBU_DBG_TIMEOUT_US		30000
#define TBU_DBG_TIMEOUT_US		30000



struct actlr_setting {
	struct arm_smmu_smr smr;
	u32 actlr;
};

struct qsmmuv500_archdata {
struct qsmmuv500_archdata {
	struct list_head		tbus;
	struct list_head		tbus;
	void __iomem			*tcu_base;
	void __iomem			*tcu_base;
	u32				version;
	u32				version;
	struct actlr_setting		*actlrs;
	u32				actlr_tbl_size;
};
};
#define get_qsmmuv500_archdata(smmu)				\
#define get_qsmmuv500_archdata(smmu)				\
	((struct qsmmuv500_archdata *)(smmu->archdata))
	((struct qsmmuv500_archdata *)(smmu->archdata))
@@ -4403,6 +4464,32 @@ struct qsmmuv500_tbu_device {
	u32				halt_count;
	u32				halt_count;
};
};


struct qsmmuv500_group_iommudata {
	bool has_actlr;
	u32 actlr;
};
#define to_qsmmuv500_group_iommudata(group)				\
	((struct qsmmuv500_group_iommudata *)				\
		(iommu_group_get_iommudata(group)))


static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
				      struct arm_smmu_smr *smr)
{
	struct arm_smmu_smr *smr2;
	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
	int i, idx;

	for_each_cfg_sme(fwspec, i, idx) {
		smr2 = &smmu->smrs[idx];
		/* Continue if table entry does not match */
		if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
			continue;
		return true;
	}
	return false;
}

static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
{
{
	unsigned long flags;
	unsigned long flags;
@@ -4656,6 +4743,74 @@ static phys_addr_t qsmmuv500_iova_to_phys_hard(
	return qsmmuv500_iova_to_phys(domain, iova, sid);
	return qsmmuv500_iova_to_phys(domain, iova, sid);
}
}


static void qsmmuv500_release_group_iommudata(void *data)
{
	kfree(data);
}

/* If a device has a valid actlr, it must match */
static int qsmmuv500_device_group(struct device *dev,
				struct iommu_group *group)
{
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
	struct qsmmuv500_group_iommudata *iommudata;
	u32 actlr, i;
	struct arm_smmu_smr *smr;

	iommudata = to_qsmmuv500_group_iommudata(group);
	if (!iommudata) {
		iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
		if (!iommudata)
			return -ENOMEM;

		iommu_group_set_iommudata(group, iommudata,
				qsmmuv500_release_group_iommudata);
	}

	for (i = 0; i < data->actlr_tbl_size; i++) {
		smr = &data->actlrs[i].smr;
		actlr = data->actlrs[i].actlr;

		if (!arm_smmu_fwspec_match_smr(fwspec, smr))
			continue;

		if (!iommudata->has_actlr) {
			iommudata->actlr = actlr;
			iommudata->has_actlr = true;
		} else if (iommudata->actlr != actlr) {
			return -EINVAL;
		}
	}

	return 0;
}

static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
				struct device *dev)
{
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	struct qsmmuv500_group_iommudata *iommudata =
		to_qsmmuv500_group_iommudata(dev->iommu_group);
	void __iomem *cb_base;
	const struct iommu_gather_ops *tlb;

	if (!iommudata->has_actlr)
		return;

	tlb = smmu_domain->pgtbl_cfg.tlb;
	cb_base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);

	writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);

	/*
	 * Flush the context bank after modifying ACTLR to ensure there
	 * are no cache entries with stale state
	 */
	tlb->tlb_flush_all(smmu_domain);
}

static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
{
{
	struct arm_smmu_device *smmu = cookie;
	struct arm_smmu_device *smmu = cookie;
@@ -4675,6 +4830,38 @@ static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
	return 0;
	return 0;
}
}


static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
{
	int len, i;
	struct device *dev = smmu->dev;
	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
	struct actlr_setting *actlrs;
	const __be32 *cell;

	cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
	if (!cell)
		return 0;

	len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
						sizeof(u32) * 3);
	if (len < 0)
		return 0;

	actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
	if (!actlrs)
		return -ENOMEM;

	for (i = 0; i < len; i++) {
		actlrs[i].smr.id = of_read_number(cell++, 1);
		actlrs[i].smr.mask = of_read_number(cell++, 1);
		actlrs[i].actlr = of_read_number(cell++, 1);
	}

	data->actlrs = actlrs;
	data->actlr_tbl_size = len;
	return 0;
}

static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
{
{
	struct resource *res;
	struct resource *res;
@@ -4682,6 +4869,8 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
	struct qsmmuv500_archdata *data;
	struct qsmmuv500_archdata *data;
	struct platform_device *pdev;
	struct platform_device *pdev;
	int ret;
	int ret;
	u32 val;
	void __iomem *reg;


	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
	if (!data)
	if (!data)
@@ -4698,6 +4887,22 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
	smmu->archdata = data;
	smmu->archdata = data;


	ret = qsmmuv500_read_actlr_tbl(smmu);
	if (ret)
		return ret;

	reg = ARM_SMMU_GR0(smmu);
	val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
	val &= ~ARM_MMU500_ACR_CACHE_LOCK;
	writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
	val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
	/*
	 * Modifiying the nonsecure copy of the sACR register is only
	 * allowed if permission is given in the secure sACR register.
	 * Attempt to detect if we were able to update the value.
	 */
	WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);

	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
	if (ret)
	if (ret)
		return ret;
		return ret;
@@ -4713,6 +4918,8 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
	.init = qsmmuv500_arch_init,
	.init = qsmmuv500_arch_init,
	.iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
	.iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
	.init_context_bank = qsmmuv500_init_cb,
	.device_group = qsmmuv500_device_group,
};
};


static const struct of_device_id qsmmuv500_tbu_of_match[] = {
static const struct of_device_id qsmmuv500_tbu_of_match[] = {