Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6054ea92 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Revert the GPU address / ID conflict fix



Partial revert of 387a5dfc. The IDs
are now big enough that 32 bit user space ends up sign-extending
the offset and causing all sorts of confusion in the kernel. This
is a partial revert because the previous change also had a rather
critical fix to avoid OOM conditions and we would rather keep
that around.

Change-Id: Ic0dedbad4a5ac8f35d97da91b92f6d3d2951403d
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent acdce027
Loading
Loading
Loading
Loading
+3 −22
Original line number Original line Diff line number Diff line
@@ -64,13 +64,6 @@
#define KGSL_DMA_BIT_MASK	DMA_BIT_MASK(32)
#define KGSL_DMA_BIT_MASK	DMA_BIT_MASK(32)
#endif
#endif


/*
 * To accommodate legacy GPU address mmapping we need to make sure that the GPU
 * object won't conflict with the address space so define the IDs to start
 * at the top of the user address space region
 */
#define KGSL_GPUOBJ_ID_MIN    (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT)

/*
/*
 * Define an kmem cache for the memobj structures since we allocate and free
 * Define an kmem cache for the memobj structures since we allocate and free
 * them so frequently
 * them so frequently
@@ -417,8 +410,7 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
		return -EBADF;
		return -EBADF;
	idr_preload(GFP_KERNEL);
	idr_preload(GFP_KERNEL);
	spin_lock(&process->mem_lock);
	spin_lock(&process->mem_lock);
	id = idr_alloc(&process->mem_idr, entry, KGSL_GPUOBJ_ID_MIN, 0,
	id = idr_alloc(&process->mem_idr, entry, 1, 0, GFP_NOWAIT);
		GFP_NOWAIT);
	spin_unlock(&process->mem_lock);
	spin_unlock(&process->mem_lock);
	idr_preload_end();
	idr_preload_end();


@@ -3773,13 +3765,8 @@ get_mmap_entry(struct kgsl_process_private *private,
	int ret = 0;
	int ret = 0;
	struct kgsl_mem_entry *entry;
	struct kgsl_mem_entry *entry;


	/*
	 * GPU object IDs start at KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT.  Anything
	 * less is legacy GPU memory being mapped by address
	 */
	if (pgoff >= KGSL_GPUOBJ_ID_MIN)
	entry = kgsl_sharedmem_find_id(private, pgoff);
	entry = kgsl_sharedmem_find_id(private, pgoff);
	else
	if (entry == NULL)
		entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
		entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);


	if (!entry)
	if (!entry)
@@ -3792,12 +3779,6 @@ get_mmap_entry(struct kgsl_process_private *private,
		goto err_put;
		goto err_put;
	}
	}


	/* External memory cannot be mapped */
	if ((KGSL_MEMFLAGS_USERMEM_MASK & entry->memdesc.flags) != 0) {
		ret = -EINVAL;
		goto err_put;
	}

	if (entry->memdesc.useraddr != 0) {
	if (entry->memdesc.useraddr != 0) {
		ret = -EBUSY;
		ret = -EBUSY;
		goto err_put;
		goto err_put;