Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50d5ac3d authored by Jordan Crouse's avatar Jordan Crouse Committed by John Stultz
Browse files

FROMLIST: drm/msm/a6xx: Use the DMA API for GMU memory objects



The GMU has very few memory allocations and uses a flat memory space so
there is no good reason to go out of our way to bypass the DMA APIs which
were basically designed for this exact scenario.

v2: Pass force_dma false to of_dma_configure to require that the DMA
region be set up and return error from of_dma_configure to fail probe.

Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Link: https://lore.kernel.org/lkml/1582223216-23459-5-git-send-email-jcrouse@codeaurora.org/
Change-Id: I4868cc684c5201d75e8bdba2bb4e2f949794f98d
parent c709c16b
Loading
Loading
Loading
Loading
+10 −102
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */

#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
@@ -877,21 +878,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)

static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
	int count, i;
	u64 iova;

	if (IS_ERR_OR_NULL(bo))
		return;

	count = bo->size >> PAGE_SHIFT;
	iova = bo->iova;

	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
		__free_pages(bo->pages[i], 0);
	}

	kfree(bo->pages);
	dma_free_attrs(gmu->dev, bo->size, bo->virt, bo->iova, bo->attrs);
	kfree(bo);
}

@@ -899,94 +889,23 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
		size_t size)
{
	struct a6xx_gmu_bo *bo;
	int ret, count, i;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	bo->size = PAGE_ALIGN(size);
	bo->attrs = DMA_ATTR_WRITE_COMBINE;

	count = bo->size >> PAGE_SHIFT;
	bo->virt = dma_alloc_attrs(gmu->dev, bo->size, &bo->iova, GFP_KERNEL,
		bo->attrs);

	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
	if (!bo->pages) {
	if (!bo->virt) {
		kfree(bo);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < count; i++) {
		bo->pages[i] = alloc_page(GFP_KERNEL);
		if (!bo->pages[i])
			goto err;
	}

	bo->iova = gmu->uncached_iova_base;

	for (i = 0; i < count; i++) {
		ret = iommu_map(gmu->domain,
			bo->iova + (PAGE_SIZE * i),
			page_to_phys(bo->pages[i]), PAGE_SIZE,
			IOMMU_READ | IOMMU_WRITE);

		if (ret) {
			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");

			for (i = i - 1 ; i >= 0; i--)
				iommu_unmap(gmu->domain,
					bo->iova + (PAGE_SIZE * i),
					PAGE_SIZE);

			goto err;
		}
	}

	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
		pgprot_writecombine(PAGE_KERNEL));
	if (!bo->virt)
		goto err;

	/* Align future IOVA addresses on 1MB boundaries */
	gmu->uncached_iova_base += ALIGN(size, SZ_1M);

	return bo;

err:
	for (i = 0; i < count; i++) {
		if (bo->pages[i])
			__free_pages(bo->pages[i], 0);
	}

	kfree(bo->pages);
	kfree(bo);

	return ERR_PTR(-ENOMEM);
}

static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
	int ret;

	/*
	 * The GMU address space is hardcoded to treat the range
	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
	 * between the GMU and the CPU will live in this space
	 */
	gmu->uncached_iova_base = 0x60000000;


	gmu->domain = iommu_domain_alloc(&platform_bus_type);
	if (!gmu->domain)
		return -ENODEV;

	ret = iommu_attach_device(gmu->domain, gmu->dev);

	if (ret) {
		iommu_domain_free(gmu->domain);
		gmu->domain = NULL;
	}

	return ret;
}

/* Return the 'arc-level' for the given frequency */
@@ -1244,10 +1163,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)

	a6xx_gmu_memory_free(gmu, gmu->hfi);

	iommu_detach_device(gmu->domain, gmu->dev);

	iommu_domain_free(gmu->domain);

	free_irq(gmu->gmu_irq, gmu);
	free_irq(gmu->hfi_irq, gmu);

@@ -1268,7 +1183,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)

	gmu->dev = &pdev->dev;

	of_dma_configure(gmu->dev, node, true);
	/* Pass force_dma false to require the DT to set the dma region */
	ret = of_dma_configure(gmu->dev, node, false);
	if (ret)
		return ret;

	/* Fow now, don't do anything fancy until we get our feet under us */
	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1280,11 +1198,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
	if (ret)
		goto err_put_device;

	/* Set up the IOMMU context bank */
	ret = a6xx_gmu_memory_probe(gmu);
	if (ret)
		goto err_put_device;

	/* Allocate memory for for the HFI queues */
	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
	if (IS_ERR(gmu->hfi))
@@ -1330,11 +1243,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_memory:
	a6xx_gmu_memory_free(gmu, gmu->hfi);

	if (gmu->domain) {
		iommu_detach_device(gmu->domain, gmu->dev);

		iommu_domain_free(gmu->domain);
	}
	ret = -ENODEV;

err_put_device:
+1 −4
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@ struct a6xx_gmu_bo {
	void *virt;
	size_t size;
	u64 iova;
	struct page **pages;
	unsigned long attrs;
};

/*
@@ -49,9 +49,6 @@ struct a6xx_gmu {
	int hfi_irq;
	int gmu_irq;

	struct iommu_domain *domain;
	u64 uncached_iova_base;

	struct device *gxpd;

	int idle_level;