Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c294ef2 authored by Jordan Crouse's avatar Jordan Crouse Committed by Harshdeep Dhatt
Browse files

msm: kgsl: Add support for split pagetables



Attempt to enable split pagetables on the global and LPAC domains.
If successful we no longer need to map globals in all pagetables and we
don't need to manually return to the default pagetable when coming out
of slumber since all the globals will now live in the always on TTBR1
region.

Change-Id: Ic0dedbaddd7d05299d7e85d7b0bbc96b46dad10e
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarHarshdeep Dhatt <hdhatt@codeaurora.org>
parent 3ba19ae9
Loading
Loading
Loading
Loading
+99 −12
Original line number Diff line number Diff line
@@ -25,10 +25,8 @@

#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))

#define ADDR_IN_GLOBAL(_mmu, _a) \
	(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
	 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
	 KGSL_IOMMU_GLOBAL_MEM_SIZE)))

#define KGSL_IOMMU_SPLIT_TABLE_BASE 0x0001ff8000000000ULL

/*
 * Flag to set SMMU memory attributes required to
@@ -59,6 +57,22 @@ struct kgsl_iommu_addr_entry {

static struct kmem_cache *addr_entry_cache;

static bool kgsl_iommu_split_tables_enabled(struct kgsl_mmu *mmu)
{
	return (test_bit(KGSL_MMU_SPLIT_TABLES_GC, &mmu->features) &&
		test_bit(KGSL_MMU_SPLIT_TABLES_LPAC, &mmu->features));
}

static bool kgsl_iommu_addr_is_global(struct kgsl_mmu *mmu, u64 addr)
{
	if (kgsl_iommu_split_tables_enabled(mmu))
		return (addr >= KGSL_IOMMU_SPLIT_TABLE_BASE);

	return ((addr >= KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)) &&
		(addr < KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) +
		 KGSL_IOMMU_GLOBAL_MEM_SIZE));
}

static void __iomem *kgsl_iommu_reg(struct kgsl_iommu_context *ctx,
		u32 offset)
{
@@ -116,12 +130,22 @@ static u32 KGSL_IOMMU_GET_CTX_REG(struct kgsl_iommu_context *ctx, u32 offset)
	return readl_relaxed(addr);
}

static bool kgsl_iommu_is_global_pt(struct kgsl_pagetable *pt)
{
	return (pt->name == KGSL_MMU_GLOBAL_PT ||
		pt->name == KGSL_MMU_GLOBAL_LPAC_PT);
}

static void kgsl_iommu_unmap_globals(struct kgsl_mmu *mmu,
		struct kgsl_pagetable *pagetable)
{
	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
	struct kgsl_global_memdesc *md;

	if (!kgsl_iommu_is_global_pt(pagetable)
		&& kgsl_iommu_split_tables_enabled(mmu))
		return;

	list_for_each_entry(md, &device->globals, node) {
		if (md->memdesc.flags & KGSL_MEMFLAGS_SECURE)
			continue;
@@ -136,6 +160,14 @@ static void kgsl_iommu_map_globals(struct kgsl_mmu *mmu,
	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
	struct kgsl_global_memdesc *md;

	if (IS_ERR_OR_NULL(pagetable))
		return;

	if (!kgsl_iommu_is_global_pt(pagetable)
		&& kgsl_iommu_split_tables_enabled(mmu)) {
		return;
	}

	list_for_each_entry(md, &device->globals, node) {
		if (md->memdesc.flags & KGSL_MEMFLAGS_SECURE)
			continue;
@@ -229,13 +261,21 @@ static void kgsl_iommu_map_global(struct kgsl_mmu *mmu,
	}

	if (!memdesc->gpuaddr) {
		u64 offset = global_get_offset(device, memdesc->size,
		u64 offset;
		u64 base;

		offset = global_get_offset(device, memdesc->size,
			memdesc->priv);

		if (IS_ERR_VALUE(offset))
			return;

		memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + offset;
		if (kgsl_iommu_split_tables_enabled(mmu))
			base = KGSL_IOMMU_SPLIT_TABLE_BASE;
		else
			base = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);

		memdesc->gpuaddr = base + offset;
	}

	/*
@@ -243,7 +283,7 @@ static void kgsl_iommu_map_global(struct kgsl_mmu *mmu,
	 * been created since we do not go back and retroactively add the
	 * globals to existing pages
	 */
	WARN_ON(iommu->ppt_active);
	WARN_ON(!kgsl_iommu_split_tables_enabled(mmu) && iommu->ppt_active);

	kgsl_iommu_map_global_to_pt(mmu, memdesc, mmu->defaultpagetable);
	kgsl_iommu_map_global_to_pt(mmu, memdesc, mmu->lpac_pagetable);
@@ -284,6 +324,10 @@ static int _iommu_map_single_page(struct kgsl_pagetable *pt,
	int i;
	int ret = 0;

	/* Sign extend TTBR1 addresses all the way to avoid warning */
	if (gpuaddr & (1ULL << 48))
		gpuaddr |= 0xffff000000000000;

	for (i = 0; i < times; i++) {
		ret = iommu_map(iommu_pt->domain, gpuaddr + mapped,
				physaddr, PAGE_SIZE, flags);
@@ -311,6 +355,10 @@ static int _iommu_unmap(struct kgsl_pagetable *pt,
	struct kgsl_iommu_pt *iommu_pt = pt->priv;
	size_t unmapped = 0;

	/* Sign extend TTBR1 addresses all the way to avoid warning */
	if (addr & (1ULL << 48))
		addr |= 0xffff000000000000;

	unmapped = iommu_unmap(iommu_pt->domain, addr, size);

	if (unmapped != size) {
@@ -336,6 +384,10 @@ static int _iommu_map_sg_offset(struct kgsl_pagetable *pt,
	phys_addr_t physaddr;
	int ret;

	/* Sign extend TTBR1 addresses all the way to avoid warning */
	if (addr & (1ULL << 48))
		addr |= 0xffff000000000000;

	for_each_sg(sg, s, nents, i) {
		/* Iterate until we find the offset */
		if (offset_tmp >= s->length) {
@@ -391,6 +443,10 @@ static int _iommu_map_sg(struct kgsl_pagetable *pt,
	struct kgsl_iommu_pt *iommu_pt = pt->priv;
	size_t mapped;

	/* Sign extend TTBR1 addresses all the way to avoid warning */
	if (addr & (1ULL << 48))
		addr |= 0xffff000000000000;

	mapped = iommu_map_sg(iommu_pt->domain, addr, sg, nents, flags);

	if (mapped == 0) {
@@ -710,7 +766,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
			 * in the global region. These are rare and nobody needs
			 * to know the addresses that are in here
			 */
			if (ADDR_IN_GLOBAL(mmu, addr)) {
			if (kgsl_iommu_addr_is_global(mmu, addr)) {
				dev_err(ctx->kgsldev->dev, "Fault in global memory\n");
			} else {
				dev_err(ctx->kgsldev->dev,
@@ -1074,10 +1130,20 @@ static int set_smmu_aperture(struct kgsl_device *device, int cb_num)
	return ret;
}

static bool check_split_tables(struct kgsl_iommu_pt *iommu_pt)
{
	int val, ret;

	ret = iommu_domain_get_attr(iommu_pt->domain,
		DOMAIN_ATTR_SPLIT_TABLES, &val);

	return (!ret && val == 1);
}

static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
	int ret = 0;
	int ret = 0, val = 1;
	struct kgsl_iommu_pt *iommu_pt = NULL;
	unsigned int cb_num;
	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
@@ -1101,6 +1167,10 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)

	_enable_gpuhtw_llc(mmu, iommu_pt->domain);

	if (test_bit(KGSL_MMU_64BIT, &mmu->features))
		iommu_domain_set_attr(iommu_pt->domain,
			DOMAIN_ATTR_SPLIT_TABLES, &val);

	ret = _attach_pt(iommu_pt, ctx);
	if (ret)
		goto done;
@@ -1141,7 +1211,8 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
		goto done;
	}

	kgsl_iommu_map_globals(mmu, pt);
	if (check_split_tables(iommu_pt))
		set_bit(KGSL_MMU_SPLIT_TABLES_GC, &mmu->features);

done:
	if (ret)
@@ -1153,7 +1224,7 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
static int _init_global_lpac_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
	int ret = 0;
	int ret = 0, val = 1;
	struct kgsl_iommu_pt *iommu_pt = NULL;
	unsigned int cb_num;
	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
@@ -1166,6 +1237,8 @@ static int _init_global_lpac_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)

	_enable_gpuhtw_llc(mmu, iommu_pt->domain);

	iommu_domain_set_attr(iommu_pt->domain, DOMAIN_ATTR_SPLIT_TABLES, &val);

	ret = _attach_pt(iommu_pt, ctx);
	if (ret)
		goto done;
@@ -1194,7 +1267,8 @@ static int _init_global_lpac_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)

	ctx->cb_num = (int) cb_num;

	kgsl_iommu_map_globals(mmu, pt);
	if (check_split_tables(iommu_pt))
		set_bit(KGSL_MMU_SPLIT_TABLES_LPAC, &mmu->features);

done:
	if (ret)
@@ -1970,6 +2044,10 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
	unsigned int contextidr;
	unsigned long wait_for_flush;

	/* Not needed if split tables are enabled */
	if (kgsl_iommu_split_tables_enabled(mmu))
		return 0;

	if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
		return 0;

@@ -2622,6 +2700,15 @@ int kgsl_iommu_probe(struct kgsl_device *device)
	iommu_probe_secure_context(device, node);
	of_node_put(node);

	/*
	 * To preserve legacy behavior, make sure to map any globals that might
	 * have been allocated before the MMU probe. Do this after the probes
	 * so that we can be sure that split pagetable support is available
	 * (since both GC and LPAC need to enable it).
	 */
	kgsl_iommu_map_globals(mmu, mmu->defaultpagetable);
	kgsl_iommu_map_globals(mmu, mmu->lpac_pagetable);

	device->qdss_desc = kgsl_allocate_global_fixed(device,
		"qcom,gpu-qdss-stm", "gpu-qdss");

+6 −0
Original line number Diff line number Diff line
@@ -128,6 +128,12 @@ enum kgsl_mmu_feature {
	KGSL_MMU_LLCC_ENABLE,
	/** @KGSL_MMU_SMMU_APERTURE: Set the SMMU aperture */
	KGSL_MMU_SMMU_APERTURE,
	/** @KGSL_MMU_SPLIT_TABLES_GC: Split pagetables are enabled for GC */
	KGSL_MMU_SPLIT_TABLES_GC,
	/**
	 * @KGSL_MMU_SPLIT_TABLES_LPAC: Split pagetables are enabled for LPAC
	 */
	KGSL_MMU_SPLIT_TABLES_LPAC,
};

/**