Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 387a5dfc authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Avoid legacy GPU address / ID conflicts in mmap



It is possible to have a conflict in mmap between a legacy GPU object
mapped at a very low address and a GPU object mapped via ID.  Prevent
a conflict by only allowing legacy GPU address mapping <
KGSL_SVM_UPPER_BOUND while at the same time moving the lower bound of
the assigned GPU object IDs to (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT)
thereby ensuring that never the twain shall meet.

This restriction means two things: 1) that legacy GPU memory
allocations (IOCTL_KGSL_GPUMEM_ALLOC and older) need to be allocated
from the user GPU address region (< KGSL_SVM_UPPER_BOUND) and 2)
external memory that is mapped in the upper region
(> KGSL_SVM_UPPER_BOUND) can no longer be mmaped(). The good news is
that this has always been implicitly true but this change makes it
explicit. To wit: Imported memory can never be mmaped() and legacy
GPU memory allocations always come from the lower address space.

Change-Id: Ic0dedbad98c2c014f522c1af2a664519a82c9506
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 548e7c28
Loading
Loading
Loading
Loading
+22 −4
Original line number Diff line number Diff line
@@ -64,6 +64,13 @@
#define KGSL_DMA_BIT_MASK	DMA_BIT_MASK(32)
#endif

/*
 * To accommodate legacy GPU address mmapping we need to make sure that the GPU
 * object won't conflict with the address space so define the IDs to start
 * at the top of the user address space region
 */
#define KGSL_GPUOBJ_ID_MIN    (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT)

/*
 * Define an kmem cache for the memobj structures since we allocate and free
 * them so frequently
@@ -410,7 +417,8 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
		return -EBADF;
	idr_preload(GFP_KERNEL);
	spin_lock(&process->mem_lock);
	id = idr_alloc(&process->mem_idr, entry, 1, 0, GFP_NOWAIT);
	id = idr_alloc(&process->mem_idr, entry, KGSL_GPUOBJ_ID_MIN, 0,
		GFP_NOWAIT);
	spin_unlock(&process->mem_lock);
	idr_preload_end();

@@ -3765,10 +3773,14 @@ get_mmap_entry(struct kgsl_process_private *private,
	int ret = 0;
	struct kgsl_mem_entry *entry;

	/*
	 * GPU object IDs start at KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT.  Anything
	 * less is legacy GPU memory being mapped by address
	 */
	if (pgoff >= KGSL_GPUOBJ_ID_MIN)
		entry = kgsl_sharedmem_find_id(private, pgoff);
	if (entry == NULL) {
	else
		entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
	}

	if (!entry)
		return -EINVAL;
@@ -3780,6 +3792,12 @@ get_mmap_entry(struct kgsl_process_private *private,
		goto err_put;
	}

	/* External memory cannot be mapped */
	if ((KGSL_MEMFLAGS_USERMEM_MASK & entry->memdesc.flags) != 0) {
		ret = -EINVAL;
		goto err_put;
	}

	if (entry->memdesc.useraddr != 0) {
		ret = -EBUSY;
		goto err_put;
+65 −53
Original line number Diff line number Diff line
@@ -657,6 +657,27 @@ void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);

static int _nommu_get_gpuaddr(struct kgsl_memdesc *memdesc)
{
	if (memdesc->sglen > 1) {
		KGSL_CORE_ERR(
			"Attempt to map non-contiguous memory with NOMMU\n");
		return -EINVAL;
	}

	memdesc->gpuaddr = (uint64_t) sg_dma_address(memdesc->sg);

	if (memdesc->gpuaddr == 0)
		memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sg);

	if (memdesc->gpuaddr == 0) {
		KGSL_CORE_ERR("Unable to get a physical address\n");
		return -EINVAL;
	}

	return 0;
}

/**
 * kgsl_mmu_get_gpuaddr - Assign a memdesc with a gpuadddr from the gen pool
 * @pagetable - pagetable whose pool is to be used
@@ -669,73 +690,64 @@ kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
			struct kgsl_memdesc *memdesc)
{
	int size;
	int page_align = ilog2(PAGE_SIZE);
	unsigned long bit;

	if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
		if (memdesc->sglen == 1) {
			memdesc->gpuaddr = sg_dma_address(memdesc->sg);
			if (!memdesc->gpuaddr)
				memdesc->gpuaddr = sg_phys(memdesc->sg);
			if (!memdesc->gpuaddr) {
				KGSL_CORE_ERR("Unable to get a valid physical "
					"address for memdesc\n");
				return -EINVAL;
			}
			return 0;
		} else {
			KGSL_CORE_ERR("Memory is not contigious "
					"(sglen = %d)\n", memdesc->sglen);
			return -EINVAL;
		}
	}
	if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
		return _nommu_get_gpuaddr(memdesc);

	/* Add space for the guard page when allocating the mmu VA. */
	size = memdesc->size;
	if (kgsl_memdesc_has_guard_page(memdesc))
		size += PAGE_SIZE;

	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
		/* Allocate aligned virtual addresses for iommu. This allows
	/*
	 * Allocate aligned virtual addresses for iommu. This allows
	 * more efficient pagetable entries if the physical memory
	 * is also aligned.
	 */
		if (kgsl_memdesc_get_align(memdesc) > 0)
			page_align = kgsl_memdesc_get_align(memdesc);

	if (kgsl_memdesc_use_cpu_map(memdesc)) {
		if (memdesc->gpuaddr == 0)
			return -EINVAL;
		bitmap_set(pagetable->mem_bitmap,
				memdesc->gpuaddr >> PAGE_SHIFT,
				size >> PAGE_SHIFT);
			(int) (memdesc->gpuaddr >> PAGE_SHIFT),
			(int) (size >> PAGE_SHIFT));
		memdesc->priv |= KGSL_MEMDESC_BITMAP_ALLOC;
		return 0;
	}
	}

	if (KGSL_MEMFLAGS_USERMEM_MASK & memdesc->flags) {
		memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool, size,
						page_align);
		if (memdesc->gpuaddr)
	/*
	 * Try to map external memory in the upper region first and then fall
	 * back to user region if that fails.  All memory allocated by the user
	 * goes into the user region first.
	 */
	if ((KGSL_MEMFLAGS_USERMEM_MASK & memdesc->flags) != 0) {
		unsigned int page_align = ilog2(PAGE_SIZE);

		if (kgsl_memdesc_get_align(memdesc) > 0)
			page_align = kgsl_memdesc_get_align(memdesc);

		memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
			size, page_align);

		if (memdesc->gpuaddr) {
			memdesc->priv |= KGSL_MEMDESC_GENPOOL_ALLOC;
	} else {
		unsigned int gpuaddr = bitmap_find_next_zero_area(
				pagetable->mem_bitmap,
			return 0;
		}
	}

	bit = bitmap_find_next_zero_area(pagetable->mem_bitmap,
		KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT, 1,
				size >> PAGE_SHIFT, 0);
		(unsigned int) (size >> PAGE_SHIFT), 0);

		if (gpuaddr < (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT)) {
	if (bit && (bit < (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT))) {
		bitmap_set(pagetable->mem_bitmap,
				gpuaddr, size >> PAGE_SHIFT);
			memdesc->gpuaddr = gpuaddr << PAGE_SHIFT;
		}
		if (memdesc->gpuaddr)
				(int) bit, (int) (size >> PAGE_SHIFT));
		memdesc->gpuaddr = (bit << PAGE_SHIFT);
		memdesc->priv |= KGSL_MEMDESC_BITMAP_ALLOC;
	}

	if (memdesc->gpuaddr == 0)
		return -ENOMEM;

	return 0;
	return (memdesc->gpuaddr == 0) ? -ENOMEM : 0;
}
EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);