Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b6709083 authored by Lucas Stach's avatar Lucas Stach
Browse files

drm/etnaviv: remove IOMMU dependency



Using the IOMMU API to manage the internal GPU MMU has been an
historical accident and it keeps getting in the way, as well as
entangling the driver with the inner workings of the IOMMU
subsystem.

Clean this up by removing the usage of iommu_domain, which is the
last piece linking etnaviv to the IOMMU subsystem.

Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
parent 27d38062
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -7,8 +7,6 @@ config DRM_ETNAVIV
	select SHMEM
	select SYNC_FILE
	select TMPFS
	select IOMMU_API
	select IOMMU_SUPPORT
	select WANT_DEV_COREDUMP
	select CMA if HAVE_DMA_CONTIGUOUS
	select DMA_CMA if HAVE_DMA_CONTIGUOUS
+0 −1
Original line number Diff line number Diff line
@@ -26,7 +26,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/sizes.h>

+67 −71
Original line number Diff line number Diff line
@@ -14,7 +14,6 @@
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -31,127 +30,115 @@

#define GPU_MEM_START	0x80000000

struct etnaviv_iommu_domain_pgtable {
	u32 *pgtable;
	dma_addr_t paddr;
struct etnaviv_iommuv1_domain {
	struct etnaviv_iommu_domain base;
	u32 *pgtable_cpu;
	dma_addr_t pgtable_dma;
};

struct etnaviv_iommu_domain {
	struct iommu_domain domain;
	struct device *dev;
	void *bad_page_cpu;
	dma_addr_t bad_page_dma;
	struct etnaviv_iommu_domain_pgtable pgtable;
};

static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
static struct etnaviv_iommuv1_domain *
to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
	return container_of(domain, struct etnaviv_iommu_domain, domain);
	return container_of(domain, struct etnaviv_iommuv1_domain, base);
}

static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
{
	u32 *p;
	int i;

	etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
						etnaviv_domain->base.dev,
						SZ_4K,
						  &etnaviv_domain->bad_page_dma,
						&etnaviv_domain->base.bad_page_dma,
						GFP_KERNEL);
	if (!etnaviv_domain->bad_page_cpu)
	if (!etnaviv_domain->base.bad_page_cpu)
		return -ENOMEM;

	p = etnaviv_domain->bad_page_cpu;
	p = etnaviv_domain->base.bad_page_cpu;
	for (i = 0; i < SZ_4K / 4; i++)
		*p++ = 0xdead55aa;

	etnaviv_domain->pgtable.pgtable =
			dma_alloc_coherent(etnaviv_domain->dev, PT_SIZE,
					   &etnaviv_domain->pgtable.paddr,
	etnaviv_domain->pgtable_cpu =
			dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
					   &etnaviv_domain->pgtable_dma,
					   GFP_KERNEL);
	if (!etnaviv_domain->pgtable.pgtable) {
		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
				  etnaviv_domain->bad_page_cpu,
				  etnaviv_domain->bad_page_dma);
	if (!etnaviv_domain->pgtable_cpu) {
		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
				  etnaviv_domain->base.bad_page_cpu,
				  etnaviv_domain->base.bad_page_dma);
		return -ENOMEM;
	}

	for (i = 0; i < PT_ENTRIES; i++)
		etnaviv_domain->pgtable.pgtable[i] =
			etnaviv_domain->bad_page_dma;
		etnaviv_domain->pgtable_cpu[i] =
				etnaviv_domain->base.bad_page_dma;

	return 0;
}

static void etnaviv_domain_free(struct iommu_domain *domain)
static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
{
	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
	struct etnaviv_iommuv1_domain *etnaviv_domain =
			to_etnaviv_domain(domain);

	dma_free_coherent(etnaviv_domain->dev, PT_SIZE,
			  etnaviv_domain->pgtable.pgtable,
			  etnaviv_domain->pgtable.paddr);
	dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
			  etnaviv_domain->pgtable_cpu,
			  etnaviv_domain->pgtable_dma);

	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
			  etnaviv_domain->bad_page_cpu,
			  etnaviv_domain->bad_page_dma);
	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
			  etnaviv_domain->base.bad_page_cpu,
			  etnaviv_domain->base.bad_page_dma);

	kfree(etnaviv_domain);
}

static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
	   phys_addr_t paddr, size_t size, int prot)
static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
			       unsigned long iova, phys_addr_t paddr,
			       size_t size, int prot)
{
	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
	struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;

	if (size != SZ_4K)
		return -EINVAL;

	etnaviv_domain->pgtable.pgtable[index] = paddr;
	etnaviv_domain->pgtable_cpu[index] = paddr;

	return 0;
}

static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
	unsigned long iova, size_t size)
{
	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
	struct etnaviv_iommuv1_domain *etnaviv_domain =
			to_etnaviv_domain(domain);
	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;

	if (size != SZ_4K)
		return -EINVAL;

	etnaviv_domain->pgtable.pgtable[index] = etnaviv_domain->bad_page_dma;
	etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;

	return SZ_4K;
}

static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
{
	return PT_SIZE;
}

static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
	struct etnaviv_iommuv1_domain *etnaviv_domain =
			to_etnaviv_domain(domain);

	memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
	memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
}

static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
	.ops = {
		.domain_free = etnaviv_domain_free,
		.map = etnaviv_iommuv1_map,
		.unmap = etnaviv_iommuv1_unmap,
		.pgsize_bitmap = SZ_4K,
	},
	.dump_size = etnaviv_iommuv1_dump_size,
	.dump = etnaviv_iommuv1_dump,
};

void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
{
	struct etnaviv_iommu_domain *etnaviv_domain =
	struct etnaviv_iommuv1_domain *etnaviv_domain =
			to_etnaviv_domain(gpu->mmu->domain);
	u32 pgtable;

@@ -163,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);

	/* set page table address in MC */
	pgtable = (u32)etnaviv_domain->pgtable.paddr;
	pgtable = (u32)etnaviv_domain->pgtable_dma;

	gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
	gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -172,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
	gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}

struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
	.free = etnaviv_iommuv1_domain_free,
	.map = etnaviv_iommuv1_map,
	.unmap = etnaviv_iommuv1_unmap,
	.dump_size = etnaviv_iommuv1_dump_size,
	.dump = etnaviv_iommuv1_dump,
};

struct etnaviv_iommu_domain *
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
{
	struct etnaviv_iommu_domain *etnaviv_domain;
	struct etnaviv_iommuv1_domain *etnaviv_domain;
	struct etnaviv_iommu_domain *domain;
	int ret;

	etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
	if (!etnaviv_domain)
		return NULL;

	etnaviv_domain->dev = gpu->dev;
	domain = &etnaviv_domain->base;

	etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
	etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
	etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
	etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
	etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
	domain->dev = gpu->dev;
	domain->base = GPU_MEM_START;
	domain->size = PT_ENTRIES * SZ_4K;
	domain->ops = &etnaviv_iommuv1_ops;

	ret = __etnaviv_iommu_init(etnaviv_domain);
	if (ret)
		goto out_free;

	return &etnaviv_domain->domain;
	return &etnaviv_domain->base;

out_free:
	kfree(etnaviv_domain);
+5 −2
Original line number Diff line number Diff line
@@ -18,11 +18,14 @@
#define __ETNAVIV_IOMMU_H__

struct etnaviv_gpu;
struct etnaviv_iommu_domain;

struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
struct etnaviv_iommu_domain *
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);

struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
struct etnaviv_iommu_domain *
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);

#endif /* __ETNAVIV_IOMMU_H__ */
+52 −54
Original line number Diff line number Diff line
@@ -14,7 +14,6 @@
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -40,10 +39,7 @@
#define MMUv2_MAX_STLB_ENTRIES		1024

struct etnaviv_iommuv2_domain {
	struct iommu_domain domain;
	struct device *dev;
	void *bad_page_cpu;
	dma_addr_t bad_page_dma;
	struct etnaviv_iommu_domain base;
	/* M(aster) TLB aka first level pagetable */
	u32 *mtlb_cpu;
	dma_addr_t mtlb_dma;
@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
	dma_addr_t stlb_dma[1024];
};

static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
static struct etnaviv_iommuv2_domain *
to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
	return container_of(domain, struct etnaviv_iommuv2_domain, domain);
	return container_of(domain, struct etnaviv_iommuv2_domain, base);
}

static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
	   phys_addr_t paddr, size_t size, int prot)
static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
			       unsigned long iova, phys_addr_t paddr,
			       size_t size, int prot)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain =
			to_etnaviv_domain(domain);
@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
	if (size != SZ_4K)
		return -EINVAL;

	if (prot & IOMMU_WRITE)
	if (prot & ETNAVIV_PROT_WRITE)
		entry |= MMUv2_PTE_WRITEABLE;

	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
@@ -79,7 +77,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
	return 0;
}

static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
				    unsigned long iova, size_t size)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain =
@@ -103,19 +101,20 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
	int ret, i, j;

	/* allocate scratch page */
	etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
						etnaviv_domain->base.dev,
						SZ_4K,
						  &etnaviv_domain->bad_page_dma,
						&etnaviv_domain->base.bad_page_dma,
						GFP_KERNEL);
	if (!etnaviv_domain->bad_page_cpu) {
	if (!etnaviv_domain->base.bad_page_cpu) {
		ret = -ENOMEM;
		goto fail_mem;
	}
	p = etnaviv_domain->bad_page_cpu;
	p = etnaviv_domain->base.bad_page_cpu;
	for (i = 0; i < SZ_4K / 4; i++)
		*p++ = 0xdead55aa;

	etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
	etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
						  SZ_4K,
						  &etnaviv_domain->mtlb_dma,
						  GFP_KERNEL);
@@ -127,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
	/* pre-populate STLB pages (may want to switch to on-demand later) */
	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
		etnaviv_domain->stlb_cpu[i] =
				dma_alloc_coherent(etnaviv_domain->dev,
				dma_alloc_coherent(etnaviv_domain->base.dev,
						   SZ_4K,
						   &etnaviv_domain->stlb_dma[i],
						   GFP_KERNEL);
@@ -146,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
	return 0;

fail_mem:
	if (etnaviv_domain->bad_page_cpu)
		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
				  etnaviv_domain->bad_page_cpu,
				  etnaviv_domain->bad_page_dma);
	if (etnaviv_domain->base.bad_page_cpu)
		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
				  etnaviv_domain->base.bad_page_cpu,
				  etnaviv_domain->base.bad_page_dma);

	if (etnaviv_domain->mtlb_cpu)
		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
				  etnaviv_domain->mtlb_cpu,
				  etnaviv_domain->mtlb_dma);

	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
		if (etnaviv_domain->stlb_cpu[i])
			dma_free_coherent(etnaviv_domain->dev, SZ_4K,
			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
					  etnaviv_domain->stlb_cpu[i],
					  etnaviv_domain->stlb_dma[i]);
	}
@@ -166,23 +165,23 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
	return ret;
}

static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain =
			to_etnaviv_domain(domain);
	int i;

	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
			  etnaviv_domain->bad_page_cpu,
			  etnaviv_domain->bad_page_dma);
	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
			  etnaviv_domain->base.bad_page_cpu,
			  etnaviv_domain->base.bad_page_dma);

	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
			  etnaviv_domain->mtlb_cpu,
			  etnaviv_domain->mtlb_dma);

	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
		if (etnaviv_domain->stlb_cpu[i])
			dma_free_coherent(etnaviv_domain->dev, SZ_4K,
			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
					  etnaviv_domain->stlb_cpu[i],
					  etnaviv_domain->stlb_dma[i]);
	}
@@ -190,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
	vfree(etnaviv_domain);
}

static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain =
			to_etnaviv_domain(domain);
@@ -204,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
	return dump_size;
}

static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain =
			to_etnaviv_domain(domain);
@@ -217,17 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
			memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}

static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
	.ops = {
		.domain_free = etnaviv_iommuv2_domain_free,
		.map = etnaviv_iommuv2_map,
		.unmap = etnaviv_iommuv2_unmap,
		.pgsize_bitmap = SZ_4K,
	},
	.dump_size = etnaviv_iommuv2_dump_size,
	.dump = etnaviv_iommuv2_dump,
};

void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain =
@@ -240,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)

	prefetch = etnaviv_buffer_config_mmuv2(gpu,
				(u32)etnaviv_domain->mtlb_dma,
				(u32)etnaviv_domain->bad_page_dma);
				(u32)etnaviv_domain->base.bad_page_dma);
	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
			     prefetch);
	etnaviv_gpu_wait_idle(gpu, 100);

	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)

const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
	.free = etnaviv_iommuv2_domain_free,
	.map = etnaviv_iommuv2_map,
	.unmap = etnaviv_iommuv2_unmap,
	.dump_size = etnaviv_iommuv2_dump_size,
	.dump = etnaviv_iommuv2_dump,
};

struct etnaviv_iommu_domain *
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
{
	struct etnaviv_iommuv2_domain *etnaviv_domain;
	struct etnaviv_iommu_domain *domain;
	int ret;

	etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
	if (!etnaviv_domain)
		return NULL;

	etnaviv_domain->dev = gpu->dev;
	domain = &etnaviv_domain->base;

	etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
	etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
	etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
	etnaviv_domain->domain.geometry.aperture_start = 0;
	etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
	domain->dev = gpu->dev;
	domain->base = 0;
	domain->size = (u64)SZ_1G * 4;
	domain->ops = &etnaviv_iommuv2_ops;

	ret = etnaviv_iommuv2_init(etnaviv_domain);
	if (ret)
		goto out_free;

	return &etnaviv_domain->domain;
	return &etnaviv_domain->base;

out_free:
	vfree(etnaviv_domain);
Loading