Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e6a0ad6b authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Reorganize the shared memory APIs



Despite being designed to be flexible, the current GPU shared memory API
ended up being a jumble of forks and conditionals. Redesign the API to
have three outward facing functions:

kgsl_allocate_user
kgsl_allocate_kernel
kgsl_allocate_global

The three APIs are built on each other:

 kgsl_allocate_user() allocates three kinds of memory depending on the
system characteristics.

kgsl_allocate_kernel() allocates the same memory and maps it automatically
in the kernel space.

kgsl_allocate_global() allocates kernel memory and then adds it to the
global buffer list.

Internally the individual memory allocation functions use memdesc ops to
have unique map and free functions. This will make it easier to later omit
a specific type of memory without having to deal with a lot of #ifdefs and
code changes.

Change-Id: Ic0dedbad6c99a45d2e441097563f67f7e1e983cb
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent b0ee18e3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
		return ret;

	ret = kgsl_allocate_user(&adreno_dev->dev, &crit_pkts_refbuf0,
		PAGE_SIZE, KGSL_MEMFLAGS_SECURE);
		PAGE_SIZE, KGSL_MEMFLAGS_SECURE, 0);
	if (ret)
		return ret;

+1 −1
Original line number Diff line number Diff line
@@ -619,7 +619,7 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,

	ret = kgsl_allocate_user(device, &rb->secure_preemption_desc,
		A6XX_CP_CTXRECORD_SIZE_IN_BYTES,
		KGSL_MEMFLAGS_SECURE | KGSL_MEMDESC_PRIVILEGED);
		KGSL_MEMFLAGS_SECURE, KGSL_MEMDESC_PRIVILEGED);
	if (ret)
		return ret;

+2 −2
Original line number Diff line number Diff line
@@ -3410,7 +3410,7 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
		return ERR_PTR(-ENOMEM);

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
		size, flags);
		size, flags, 0);
	if (ret != 0)
		goto err;

@@ -3631,7 +3631,7 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
			KGSL_MEMALIGN_MASK);

	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
			param->size, flags);
			param->size, flags, 0);
	if (ret)
		goto err_remove_idr;

+2 −6
Original line number Diff line number Diff line
@@ -182,14 +182,10 @@ struct kgsl_memdesc_ops {
#define KGSL_MEMDESC_SECURE BIT(4)
/* Memory is accessible in privileged mode */
#define KGSL_MEMDESC_PRIVILEGED BIT(6)
/* The memdesc is TZ locked content protection */
#define KGSL_MEMDESC_TZ_LOCKED BIT(7)
/* The memdesc is allocated through contiguous memory */
#define KGSL_MEMDESC_CONTIG BIT(8)
/* This is an instruction buffer */
#define KGSL_MEMDESC_UCODE BIT(9)
#define KGSL_MEMDESC_UCODE BIT(7)
/* For global buffers, randomly assign an address from the region */
#define KGSL_MEMDESC_RANDOM BIT(10)
#define KGSL_MEMDESC_RANDOM BIT(8)

/**
 * struct kgsl_memdesc - GPU memory object descriptor
+2 −21
Original line number Diff line number Diff line
@@ -1421,24 +1421,6 @@ static void kgsl_iommu_close(struct kgsl_mmu *mmu)
	kgsl_cleanup_qtimer_desc(mmu);
}

static int _setstate_alloc(struct kgsl_device *device,
		struct kgsl_iommu *iommu)
{
	int ret;

	kgsl_memdesc_init(device, &iommu->setstate, 0);
	ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);

	if (!ret) {
		/* Mark the setstate memory as read only */
		iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;

		kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
	}

	return ret;
}

static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
@@ -1454,9 +1436,8 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
		return -EINVAL;
	}

	status = _setstate_alloc(device, iommu);
	if (status)
		return status;
	status = kgsl_allocate_kernel(device, &iommu->setstate, PAGE_SIZE,
		KGSL_MEMFLAGS_GPUREADONLY, 0);

	iommu->regbase = ioremap(iommu->regstart, iommu->regsize);
	if (iommu->regbase == NULL) {
Loading