Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b82217a authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Move the GMU kernel IOMMU device to the GMU platform device



In order to make devlink happy we need to bind an IOMMU device on the
primary GMU platform device.  Since we don't need GMU user yet the
decision is easy.  Move GMU kernel to the platform device instead
of a child.

Change-Id: Ic0dedbad082c57b2aeb6a35573ead8535f3b8ab9
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 7b54b39b
Loading
Loading
Loading
Loading
+22 −113
Original line number Diff line number Diff line
@@ -24,12 +24,6 @@
#include "kgsl_trace.h"
#include "kgsl_util.h"

struct gmu_iommu_context {
	const char *name;
	struct platform_device *pdev;
	struct iommu_domain *domain;
};

#define ARC_VOTE_GET_PRI(_v) ((_v) & 0xFF)
#define ARC_VOTE_GET_SEC(_v) (((_v) >> 8) & 0xFF)
#define ARC_VOTE_GET_VLVL(_v) (((_v) >> 16) & 0xFFFF)
@@ -61,11 +55,6 @@ static struct gmu_vma_entry a6xx_gmu_vma_legacy[] = {
			.size = SZ_512M,
			.next_va = 0x60000000
		},
	[GMU_NONCACHED_USER] = {
			.start = 0x80000000,
			.size = SZ_1G,
			.next_va = 0x80000000
		},
};

static struct gmu_vma_entry a6xx_gmu_vma[] = {
@@ -91,16 +80,6 @@ static struct gmu_vma_entry a6xx_gmu_vma[] = {
			.size = SZ_512M,
			.next_va = 0x60000000
		},
	[GMU_NONCACHED_USER] = {
			.start = 0x80000000,
			.size = SZ_1G,
			.next_va = 0x80000000
		},
};

static struct gmu_iommu_context a6xx_gmu_ctx[] = {
	[GMU_CONTEXT_USER] = { .name = "gmu_user" },
	[GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
};

static int timed_poll_check_rscc(struct kgsl_device *device,
@@ -619,17 +598,6 @@ static int find_vma_block(struct a6xx_gmu_device *gmu, u32 addr, u32 size)
	return -ENOENT;
}

struct iommu_domain *a6xx_get_gmu_domain(struct a6xx_gmu_device *gmu,
	u32 gmuaddr, u32 size)
{
	u32 vma_id = find_vma_block(gmu, gmuaddr, size);

	if (vma_id == GMU_NONCACHED_USER)
		return a6xx_gmu_ctx[GMU_CONTEXT_USER].domain;

	return a6xx_gmu_ctx[GMU_CONTEXT_KERNEL].domain;
}

static int _load_legacy_gmu_fw(struct kgsl_device *device,
	struct a6xx_gmu_device *gmu)
{
@@ -1420,7 +1388,7 @@ struct gmu_memdesc *reserve_gmu_kernel_block(struct a6xx_gmu_device *gmu,

	md->gmuaddr = addr;

	ret = iommu_map(a6xx_get_gmu_domain(gmu, md->gmuaddr, md->size), addr,
	ret = iommu_map(gmu->domain, addr,
		md->physaddr, md->size, IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
	if (ret) {
		dev_err(&gmu->pdev->dev,
@@ -2419,18 +2387,6 @@ static int a6xx_gmu_bus_set(struct adreno_device *adreno_dev, int buslevel,
	return ret;
}

static void a6xx_gmu_iommu_cb_close(struct gmu_iommu_context *ctx)
{
	if (!ctx->domain)
		return;

	iommu_detach_device(ctx->domain, &ctx->pdev->dev);
	iommu_domain_free(ctx->domain);

	platform_device_put(ctx->pdev);
	ctx->domain = NULL;
}

static void a6xx_free_gmu_globals(struct a6xx_gmu_device *gmu)
{
	int i;
@@ -2441,7 +2397,7 @@ static void a6xx_free_gmu_globals(struct a6xx_gmu_device *gmu)
		if (!md->gmuaddr)
			continue;

		iommu_unmap(a6xx_get_gmu_domain(gmu, md->gmuaddr, md->size),
		iommu_unmap(gmu->domain,
			md->gmuaddr, md->size);

		dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
@@ -2450,8 +2406,11 @@ static void a6xx_free_gmu_globals(struct a6xx_gmu_device *gmu)
		memset(md, 0, sizeof(*md));
	}

	a6xx_gmu_iommu_cb_close(&a6xx_gmu_ctx[GMU_CONTEXT_KERNEL]);
	a6xx_gmu_iommu_cb_close(&a6xx_gmu_ctx[GMU_CONTEXT_USER]);
	if (gmu->domain) {
		iommu_detach_device(gmu->domain, &gmu->pdev->dev);
		iommu_domain_free(gmu->domain);
		gmu->domain = NULL;
	}

	gmu->global_entries = 0;
}
@@ -2587,8 +2546,7 @@ void a6xx_gmu_remove(struct kgsl_device *device)
}

static int a6xx_gmu_iommu_fault_handler(struct iommu_domain *domain,
		struct device *dev, unsigned long addr, int flags, void *token,
		const char *name)
		struct device *dev, unsigned long addr, int flags, void *token)
{
	char *fault_type = "unknown";

@@ -2601,55 +2559,22 @@ static int a6xx_gmu_iommu_fault_handler(struct iommu_domain *domain,
	else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
		fault_type = "transaction stalled";

	dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
			addr, name,
	dev_err(dev, "GMU fault addr = %lX, context=kernel (%s %s fault)\n",
			addr,
			(flags & IOMMU_FAULT_WRITE) ? "write" : "read",
			fault_type);

	return 0;
}

static int a6xx_gmu_kernel_fault_handler(struct iommu_domain *domain,
		struct device *dev, unsigned long addr, int flags, void *token)
static int a6xx_gmu_iommu_init(struct a6xx_gmu_device *gmu)
{
	return a6xx_gmu_iommu_fault_handler(domain, dev, addr, flags, token,
		"gmu_kernel");
}

static int a6xx_gmu_user_fault_handler(struct iommu_domain *domain,
		struct device *dev, unsigned long addr, int flags, void *token)
{
	return a6xx_gmu_iommu_fault_handler(domain, dev, addr, flags, token,
		"gmu_user");
}

static int a6xx_gmu_iommu_cb_probe(struct a6xx_gmu_device *gmu,
		struct gmu_iommu_context *ctx, struct device_node *parent,
		iommu_fault_handler_t handler)
{
	struct device_node *node = of_get_child_by_name(parent, ctx->name);
	struct platform_device *pdev;
	int ret;
	int no_stall = 1;

	if (!node)
		return -ENODEV;

	pdev = of_find_device_by_node(node);
	ret = of_dma_configure(&pdev->dev, node, true);
	of_node_put(node);

	if (ret) {
		platform_device_put(pdev);
		return ret;
	}

	ctx->pdev = pdev;
	ctx->domain = iommu_domain_alloc(&platform_bus_type);
	if (ctx->domain == NULL) {
		dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
			ctx->name);
		platform_device_put(pdev);
	gmu->domain = iommu_domain_alloc(&platform_bus_type);
	if (gmu->domain == NULL) {
		dev_err(&gmu->pdev->dev, "Unable to allocate GMU IOMMU domain\n");
		return -ENODEV;
	}

@@ -2658,38 +2583,22 @@ static int a6xx_gmu_iommu_cb_probe(struct a6xx_gmu_device *gmu,
	 * This sets SCTLR.CFCFG = 0.
	 * Also note that, the smmu driver sets SCTLR.HUPCF = 0 by default.
	 */
	iommu_domain_set_attr(ctx->domain,
	iommu_domain_set_attr(gmu->domain,
		DOMAIN_ATTR_FAULT_MODEL_NO_STALL, &no_stall);

	ret = iommu_attach_device(ctx->domain, &pdev->dev);
	ret = iommu_attach_device(gmu->domain, &gmu->pdev->dev);
	if (!ret) {
		iommu_set_fault_handler(ctx->domain, handler, ctx);
		iommu_set_fault_handler(gmu->domain,
			a6xx_gmu_iommu_fault_handler, gmu);
		return 0;
	}

	dev_err(&gmu->pdev->dev,
		"gmu iommu fail to attach %s device\n", ctx->name);
	iommu_domain_free(ctx->domain);
	ctx->domain = NULL;
	platform_device_put(pdev);

	return ret;
}

static int a6xx_gmu_iommu_init(struct a6xx_gmu_device *gmu,
		struct device_node *node)
{
	int ret;

	devm_of_platform_populate(&gmu->pdev->dev);
		"Unable to attach GMU IOMMU domain: %d\n", ret);
	iommu_domain_free(gmu->domain);
	gmu->domain = NULL;

	ret = a6xx_gmu_iommu_cb_probe(gmu, &a6xx_gmu_ctx[GMU_CONTEXT_USER],
			node, a6xx_gmu_user_fault_handler);
	if (ret)
	return ret;

	return a6xx_gmu_iommu_cb_probe(gmu, &a6xx_gmu_ctx[GMU_CONTEXT_KERNEL],
			node, a6xx_gmu_kernel_fault_handler);
}

int a6xx_gmu_probe(struct kgsl_device *device,
@@ -2729,7 +2638,7 @@ int a6xx_gmu_probe(struct kgsl_device *device,
	gmu->num_clks = ret;

	/* Set up GMU IOMMU and shared memory with GMU */
	ret = a6xx_gmu_iommu_init(gmu, pdev->dev.of_node);
	ret = a6xx_gmu_iommu_init(gmu);
	if (ret)
		goto error;

+2 −18
Original line number Diff line number Diff line
@@ -91,11 +91,6 @@ enum gmu_mem_type {
	GMU_MEM_TYPE_MAX,
};

enum gmu_context_index {
	GMU_CONTEXT_USER,
	GMU_CONTEXT_KERNEL,
};

/**
 * struct gmu_memdesc - Gmu shared memory object descriptor
 * @hostptr: Kernel virtual address
@@ -204,6 +199,8 @@ struct a6xx_gmu_device {
	unsigned long flags;
	/** @rscc_virt: Pointer where RSCC block is mapped */
	void __iomem *rscc_virt;
	/** @domain: IOMMU domain for the kernel context */
	struct iommu_domain *domain;
};

/* Helper function to get to a6xx gmu device from adreno device */
@@ -517,17 +514,4 @@ int a6xx_gmu_enable_clks(struct adreno_device *adreno_dev);
 */
int a6xx_gmu_enable_gdsc(struct adreno_device *adreno_dev);

/**
 * a6xx_get_gmu_domain - Get the gmu iommu domain for a gmu memory block
 * @gmu: Pointer to the a6xx gmu device
 * @gmuaddr: Address of the memory block
 * @size: Size in bytes of the memory block
 *
 * Based on the gmu address and size of a gmu memory block, get the gmu iommu
 * domain to map the memory block to.
 *
 * Return: gmu iommu domain to which the given memory block is to be mapped
 */
struct iommu_domain *a6xx_get_gmu_domain(struct a6xx_gmu_device *gmu,
	u32 gmuaddr, u32 size);
#endif
+1 −2
Original line number Diff line number Diff line
@@ -431,8 +431,7 @@ static int gmu_import_buffer(struct adreno_device *adreno_dev,

	desc->gmu_addr = vma->next_va;

	mapped = iommu_map_sg(a6xx_get_gmu_domain(gmu, desc->gmu_addr,
			desc->size),
	mapped = iommu_map_sg(gmu->domain,
			desc->gmu_addr, sgt->sgl, sgt->nents, attrs);
	if (mapped == 0)
		dev_err(&gmu->pdev->dev, "gmu map sg err: 0x%08x, %d, %x, %zd\n",