Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b111f13 authored by Liam Mark's avatar Liam Mark Committed by Prakash Gupta
Browse files

iommu/arm-smmu: add support to configure IOVA range



Allow clients to specify the IOVA range for fastmap clients
via the DOMAIN_ATTR_GEOMETRY domain attribute.

Presently fastmap only allocates page tables for the IOVA
range specified during the create mapping call. However
clients may want to use IOVA addresses outside this range,
such as for their calls to iommu_map.

So allow clients to extend the available IOVA space by setting
the DOMAIN_ATTR_GEOMETRY domain attribute's
iommu_domain_geometry.aperture_start to
the new start address of the IOVA space and by setting
iommu_domain_geometry.aperture_end to the new end address of the
IOVA space.
The new IOVA space created by iommu_domain_geometry.aperture_start
and iommu_domain_geometry.aperture_end will be a superset of the
IOVA range which was created through the create mapping call.

The DOMAIN_ATTR_GEOMETRY domain attribute can only be set before
attaching.
Calls to set the DOMAIN_ATTR_GEOMETRY domain attribute can only
be used to extend the IOVA space, it cannot shrink the range.

Note that extending the IOVA range will not change the range of
IOVA addresses which will be available to the DMA APIs.

CRs-Fixed: 2035925
Change-Id: Ib389e019a022d98417884002de08115fb0fc9384
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
[guptap@codeaurora.org: update geometry.aperture with mapping]
Signed-off-by: default avatarPrakash Gupta <guptap@codeaurora.org>
parent 86f8167c
Loading
Loading
Loading
Loading
+47 −6
Original line number Original line Diff line number Diff line
@@ -2230,6 +2230,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,


	cfg->cbndx = ret;
	cfg->cbndx = ret;


	if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_GEOMETRY))) {
		/* Geometry is not set use the default geometry */
		domain->geometry.aperture_start = 0;
		domain->geometry.aperture_end = (1UL << ias) - 1;
		if (domain->geometry.aperture_end >= SZ_1G * 4ULL)
			domain->geometry.aperture_end = (SZ_1G * 4ULL) - 1;
	}

	if (arm_smmu_is_slave_side_secure(smmu_domain)) {
	if (arm_smmu_is_slave_side_secure(smmu_domain)) {
		smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
		smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
			.quirks         = quirks,
			.quirks         = quirks,
@@ -2278,12 +2286,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
	domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
	domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
	domain->geometry.aperture_end = (1UL << ias) - 1;
	domain->geometry.aperture_end = (1UL << ias) - 1;
	domain->geometry.force_aperture = true;
	domain->geometry.force_aperture = true;
	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST)) {
		domain->geometry.aperture_start =
			smmu_domain->pgtbl_cfg.iova_base;
		domain->geometry.aperture_end =
			smmu_domain->pgtbl_cfg.iova_end;
	}


	/* Assign an asid */
	/* Assign an asid */
	ret = arm_smmu_init_asid(domain, smmu);
	ret = arm_smmu_init_asid(domain, smmu);
@@ -4016,6 +4018,45 @@ static int __arm_smmu_domain_set_attr2(struct iommu_domain *domain,
		ret = 0;
		ret = 0;
		break;
		break;
	}
	}

	case DOMAIN_ATTR_GEOMETRY: {
		struct iommu_domain_geometry *geometry =
				(struct iommu_domain_geometry *)data;

		if (smmu_domain->smmu != NULL) {
			dev_err(smmu_domain->smmu->dev,
			  "cannot set geometry attribute while attached\n");
			ret = -EBUSY;
			break;
		}

		if (geometry->aperture_start >= SZ_1G * 4ULL ||
		    geometry->aperture_end >= SZ_1G * 4ULL) {
			pr_err("fastmap does not support IOVAs >= 4GB\n");
			ret = -EINVAL;
			break;
		}
		if (smmu_domain->attributes
			  & (1 << DOMAIN_ATTR_GEOMETRY)) {
			if (geometry->aperture_start
					< domain->geometry.aperture_start)
				domain->geometry.aperture_start =
					geometry->aperture_start;

			if (geometry->aperture_end
					> domain->geometry.aperture_end)
				domain->geometry.aperture_end =
					geometry->aperture_end;
		} else {
			smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY;
			domain->geometry.aperture_start =
						geometry->aperture_start;
			domain->geometry.aperture_end = geometry->aperture_end;
		}
		ret = 0;
		break;
	}

	default:
	default:
		ret = -ENODEV;
		ret = -ENODEV;
	}
	}
+4 −2
Original line number Original line Diff line number Diff line
@@ -210,7 +210,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,


		iommu_tlbiall(mapping->domain);
		iommu_tlbiall(mapping->domain);
		mapping->have_stale_tlbs = false;
		mapping->have_stale_tlbs = false;
		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, mapping->base,
		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops,
				mapping->domain->geometry.aperture_start,
				mapping->base,
				mapping->base + mapping->size - 1,
				mapping->base + mapping->size - 1,
				skip_sync);
				skip_sync);
	}
	}
@@ -1077,7 +1079,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
 *
 *
 * Creates a mapping structure which holds information about used/unused IO
 * Creates a mapping structure which holds information about used/unused IO
 * address ranges, which is required to perform mapping with IOMMU aware
 * address ranges, which is required to perform mapping with IOMMU aware
 * functions.  The only VA range supported is [0, 4GB).
 * functions. The only VA range supported is [0, 4GB].
 *
 *
 * The client device need to be attached to the mapping with
 * The client device need to be attached to the mapping with
 * fast_smmu_attach_device function.
 * fast_smmu_attach_device function.
+7 −6
Original line number Original line Diff line number Diff line
@@ -38,6 +38,7 @@ struct av8l_fast_io_pgtable {
	struct page		**pages; /* page table memory */
	struct page		**pages; /* page table memory */
	int			nr_pages;
	int			nr_pages;
	dma_addr_t		base;
	dma_addr_t		base;
	dma_addr_t		start;
	dma_addr_t		end;
	dma_addr_t		end;
};
};


@@ -177,13 +178,13 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
}
}


void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base,
void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base,
		u64 end, bool skip_sync)
		u64 start, u64 end, bool skip_sync)
{
{
	int i;
	int i;
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	av8l_fast_iopte *pmdp = data->pmds;
	av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start);


	for (i = base >> AV8L_FAST_PAGE_SHIFT;
	for (i = start >> AV8L_FAST_PAGE_SHIFT;
			i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
			i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
		if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
		if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
			*pmdp = 0;
			*pmdp = 0;
@@ -738,7 +739,7 @@ static int __init av8l_fast_positive_testing(void)
	}
	}


	/* sweep up TLB proving PTEs */
	/* sweep up TLB proving PTEs */
	av8l_fast_clear_stale_ptes(ops, base, max, false);
	av8l_fast_clear_stale_ptes(pmds, base, base, max, false);


	/* map the entire 4GB VA space with 8K map calls */
	/* map the entire 4GB VA space with 8K map calls */
	for (iova = base; iova < max; iova += SZ_8K) {
	for (iova = base; iova < max; iova += SZ_8K) {
@@ -759,7 +760,7 @@ static int __init av8l_fast_positive_testing(void)
	}
	}


	/* sweep up TLB proving PTEs */
	/* sweep up TLB proving PTEs */
	av8l_fast_clear_stale_ptes(ops, base, max, false);
	av8l_fast_clear_stale_ptes(pmds, base, base, max, false);


	/* map the entire 4GB VA space with 16K map calls */
	/* map the entire 4GB VA space with 16K map calls */
	for (iova = base; iova < max; iova += SZ_16K) {
	for (iova = base; iova < max; iova += SZ_16K) {
@@ -780,7 +781,7 @@ static int __init av8l_fast_positive_testing(void)
	}
	}


	/* sweep up TLB proving PTEs */
	/* sweep up TLB proving PTEs */
	av8l_fast_clear_stale_ptes(ops, base, max, false);
	av8l_fast_clear_stale_ptes(pmds, base, base, max, false);


	/* map the entire 4GB VA space with 64K map calls */
	/* map the entire 4GB VA space with 64K map calls */
	for (iova = base; iova < max; iova += SZ_64K) {
	for (iova = base; iova < max; iova += SZ_64K) {
+3 −2
Original line number Original line Diff line number Diff line
@@ -79,8 +79,8 @@ av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
 */
 */
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa


void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, u64 end,
void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base,
				bool skip_sync);
				u64 start, u64 end, bool skip_sync);
void av8l_register_notify(struct notifier_block *nb);
void av8l_register_notify(struct notifier_block *nb);


#else  /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
#else  /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
@@ -89,6 +89,7 @@ void av8l_register_notify(struct notifier_block *nb);


static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops,
static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops,
					      u64 base,
					      u64 base,
					      u64 start,
					      u64 end,
					      u64 end,
					      bool skip_sync)
					      bool skip_sync)
{
{