Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe882fa3 authored by Shubhraprakash Das's avatar Shubhraprakash Das
Browse files

msm: kgsl: Optimize the function to search for free address range



kgsl_get_unmapped_area looks for a free address range that does
not collide in gpu map as well as the system map for the process.
Optimize this function by directly calling functions called within
get_unmappped_area. This also gives a better control of looking for
address ranges that have higher chance of success.

CRs-fixed: 562285
Change-Id: I530670b1753464f435d0c697a0ddaa8675a4e01d
Signed-off-by: default avatarShubhraprakash Das <sadas@codeaurora.org>
parent 76ccdc74
Loading
Loading
Loading
Loading
+226 −47
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#include <mach/socinfo.h>
#include <linux/mman.h>
#include <linux/sort.h>
#include <linux/security.h>
#include <asm/cacheflush.h>

#include "kgsl.h"
@@ -1181,6 +1182,8 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
 * @private: private data for the process to check.
 * @gpuaddr: start address of the region
 * @size: length of the region.
 * @collision_entry: Returns pointer to the colliding memory entry,
 * caller's responsibility to take a refcount on this entry
 *
 * Checks that there are no existing allocations within an address
 * region. This function should be called with processes spin lock
@@ -1188,7 +1191,8 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
 */
static int
kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
	unsigned int gpuaddr, size_t size)
	unsigned int gpuaddr, size_t size,
	struct kgsl_mem_entry **collision_entry)
{
	int result = 1;
	unsigned int gpuaddr_end = gpuaddr + size;
@@ -1220,6 +1224,8 @@ kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
		else if (memdesc_end <= gpuaddr)
			node = node->rb_right;
		else {
			if (collision_entry)
				*collision_entry = entry;
			result = 0;
			break;
		}
@@ -3563,6 +3569,105 @@ mmap_range_valid(unsigned long addr, unsigned long len)
	return ((ULONG_MAX - addr) > len) && ((addr + len) < TASK_SIZE);
}

/**
 * kgsl_check_gpu_addr_collision() - Check if an address range collides with
 * existing allocations of a process
 * @private: Pointer to process private
 * @entry: Memory entry of the memory for which address range is being
 * considered
 * @addr: Start address of the address range for which collision is checked
 * @len: Length of the address range
 * @gpumap_free_addr: The lowest address from where to look for a free address
 * range because addresses below this are known to conflict
 * @flag_top_down: Indicates whether to search for unmapped region in top down
 * or bottom mode
 * @align: The alignment requirement of the unmapped region
 *
 * Function checks if the given address range collides, and if collision
 * is found then it keeps incrementing the gpumap_free_addr until it finds
 * an address that does not collide. This suggested addr can be used by the
 * caller to check if it's acceptable.
 */
static int kgsl_check_gpu_addr_collision(
				struct kgsl_process_private *private,
				struct kgsl_mem_entry *entry,
				unsigned long addr, unsigned long len,
				unsigned long *gpumap_free_addr,
				bool flag_top_down,
				unsigned int align)
{
	int ret = -EAGAIN;
	struct kgsl_mem_entry *collision_entry = NULL;
	spin_lock(&private->mem_lock);
	if (kgsl_sharedmem_region_empty(private, addr, len, &collision_entry)) {
		/*
		 * We found a free memory map, claim it here with
		 * memory lock held
		 */
		entry->memdesc.gpuaddr = addr;
		/* This should never fail */
		ret = kgsl_mem_entry_track_gpuaddr(private, entry);
		spin_unlock(&private->mem_lock);
		BUG_ON(ret);
		/* map cannot be called with lock held */
		ret = kgsl_mmu_map(private->pagetable,
					&entry->memdesc);
		if (ret) {
			spin_lock(&private->mem_lock);
			kgsl_mem_entry_untrack_gpuaddr(private, entry);
			spin_unlock(&private->mem_lock);
		}
	} else {
		trace_kgsl_mem_unmapped_area_collision(entry, addr, len,
							ret);
		if (!gpumap_free_addr) {
			spin_unlock(&private->mem_lock);
			return ret;
		}
		/*
		 * When checking for a free gap make sure the gap is large
		 * enough to accomodate alignment
		 */
		len += 1 << align;

		/*
		 * Loop through the gpu map address space to find an unmapped
		 * region of size len, lopping is done either top down or bottom
		 * up based on flag_top_down setting
		 */
		do {
			if (!collision_entry) {
				ret = -ENOENT;
				break;
			}
			if (flag_top_down) {
				addr = collision_entry->memdesc.gpuaddr - len;
				if (addr > collision_entry->memdesc.gpuaddr) {
					ret = -EOVERFLOW;
					break;
				}
			} else {
				addr = collision_entry->memdesc.gpuaddr +
					kgsl_memdesc_mmapsize(
						&collision_entry->memdesc);
				/* overflow check */
				if (addr < collision_entry->memdesc.gpuaddr) {
					ret = -EOVERFLOW;
					break;
				}
			}
			collision_entry = NULL;
			if (kgsl_sharedmem_region_empty(private, addr, len,
							&collision_entry)) {
				*gpumap_free_addr = addr;
				break;
			}
		} while (1);
		spin_unlock(&private->mem_lock);
	}
	return ret;
}

static unsigned long
kgsl_get_unmapped_area(struct file *file, unsigned long addr,
			unsigned long len, unsigned long pgoff,
@@ -3576,6 +3681,11 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
	struct kgsl_mem_entry *entry = NULL;
	unsigned int align;
	unsigned int retry = 0;
	struct vm_area_struct *vma;
	int ret_val;
	unsigned long gpumap_free_addr = 0;
	bool flag_top_down = true;
	struct vm_unmapped_area_info info;

	if (vma_offset == device->memstore.gpuaddr)
		return get_unmapped_area(NULL, addr, len, pgoff, flags);
@@ -3584,6 +3694,9 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
	if (ret)
		return ret;

	ret = arch_mmap_check(addr, len, flags);
	if (ret)
		goto put;
	/*
	 * If we're not going to use CPU map feature, get an ordinary mapping
	 * with nothing more to be done.
@@ -3599,6 +3712,17 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
		ret = -EBUSY;
		goto put;
	}
	/* special case handling for MAP_FIXED */
	if (flags & MAP_FIXED) {
		ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
		if (!ret || IS_ERR_VALUE(ret))
			goto put;
		ret_val = kgsl_check_gpu_addr_collision(private, entry,
					addr, len, 0, 0, 0);
		if (ret_val)
			ret = ret_val;
		goto put;
	}

	align = kgsl_memdesc_get_align(&entry->memdesc);
	if (align >= ilog2(SZ_1M))
@@ -3613,60 +3737,115 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,

	if (!mmap_range_valid(addr, len))
		addr = 0;

	/*
	 * first try to see if the suggested address is accepted by the
	 * system map and our gpu map
	 */
	if (addr) {
		vma = find_vma(current->mm, addr);
		if (!vma || ((addr + len) <= vma->vm_start)) {

			if (align)
				ret = ALIGN(addr, (1 << align));

			ret_val = kgsl_check_gpu_addr_collision(private,
				entry, ret, orig_len, NULL, 0, 0);

			if (!ret_val) {
				/* success */
				goto put;
			} else if (((ret_val < 0) && (ret_val != -EAGAIN))) {
				ret = ret_val;
				goto put;
			}
		}
	}
	addr = current->mm->mmap_base;
	info.length = orig_len;
	info.align_mask = ((1 << align) - 1);
	info.align_offset = 0;
	/*
	 * Loop through the address space to find a address region agreeable to
	 * both system map and gpu map
	 */
	do {
		ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
		if (IS_ERR_VALUE(ret)) {
		if (retry) {
			/*
			 * try the bottom up approach if top down failed
			 */
			if (flag_top_down) {
				flag_top_down = false;
				addr = TASK_UNMAPPED_BASE;
				gpumap_free_addr = 0;
				ret = 0;
				retry = 0;
				continue;
			}
			/*
			 * If we are really fragmented, there may not be room
			 * for the alignment padding, so try again without it.
			 * if we are aleady doing bootom up with
			 * alignement then try w/o alignment
			 */
			if (!retry && (ret == (unsigned long)-ENOMEM)
				&& (align > PAGE_SHIFT)) {
			if (align) {
				align = 0;
				addr = 0;
				flag_top_down = true;
				addr = current->mm->mmap_base;
				gpumap_free_addr = 0;
				len = orig_len;
				retry = 1;
				ret = 0;
				retry = 0;
				info.align_mask = 0;
				continue;
			}
			/*
			 * Out of options future targets may have more address
			 * bits, for now fail
			 */
			break;
		}
		if (align)
			ret = ALIGN(ret, (1 << align));
		if (gpumap_free_addr)
			addr = gpumap_free_addr;
		if (flag_top_down) {
			info.flags = VM_UNMAPPED_AREA_TOPDOWN;
			info.low_limit = PAGE_SIZE;
			info.high_limit = addr;
		} else {
			info.flags = 0;
			info.low_limit = addr;
			info.high_limit = TASK_SIZE;
		}
		ret = vm_unmapped_area(&info);

		if (ret == (unsigned long)-ENOMEM) {
			retry = 1;
			continue;
		} else if (!ret || (~PAGE_MASK & ret)) {
			ret = -EBUSY;
			retry = 1;
			continue;
		} else if (IS_ERR_VALUE(ret)) {
			break;
		} else {
			unsigned long temp = ret;
			ret = security_mmap_addr(ret);
			if (ret) {
				retry = 1;
				continue;
			}
			ret = temp;
		}

		/* make sure there isn't a GPU only mapping at this address */
		spin_lock(&private->mem_lock);
		if (kgsl_sharedmem_region_empty(private, ret, orig_len)) {
			int ret_val;
			/*
			 * We found a free memory map, claim it here with
			 * memory lock held
			 */
			entry->memdesc.gpuaddr = ret;
			/* This should never fail */
			ret_val = kgsl_mem_entry_track_gpuaddr(private, entry);
			spin_unlock(&private->mem_lock);
			BUG_ON(ret_val);
			/* map cannot be called with lock held */
			ret_val = kgsl_mmu_map(private->pagetable,
						&entry->memdesc);
			if (ret_val) {
				spin_lock(&private->mem_lock);
				kgsl_mem_entry_untrack_gpuaddr(private, entry);
				spin_unlock(&private->mem_lock);
		ret_val = kgsl_check_gpu_addr_collision(private, entry, ret,
						orig_len, &gpumap_free_addr,
						flag_top_down, align);
		if (!ret_val) {
			/* success */
			break;
		} else if ((ret_val < 0) && (ret_val != -EAGAIN)) {
			ret = ret_val;
			}
			break;
		}
		spin_unlock(&private->mem_lock);

		trace_kgsl_mem_unmapped_area_collision(entry, addr, orig_len,
							ret);

		/*
		 * If we collided, bump the hint address so that
		 * get_umapped_area knows to look somewhere else.
		 */
		addr = (addr == 0) ? ret + orig_len : addr + orig_len;

		/*
		 * The addr hint can be set by userspace to be near
@@ -3674,19 +3853,19 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
		 * the whole address space at least once by wrapping
		 * back around once.
		 */
		if (!retry && !mmap_range_valid(addr, len)) {
			addr = 0;
		if (!mmap_range_valid(addr, len) ||
			!mmap_range_valid(gpumap_free_addr, len)) {
			retry = 1;
		} else {
			ret = -EBUSY;
			continue;
		}
	} while (!(flags & MAP_FIXED) && mmap_range_valid(addr, len));
	} while (mmap_range_valid(addr, orig_len));

put:
	if (IS_ERR_VALUE(ret))
		KGSL_MEM_ERR(device,
				"pid %d pgoff %lx len %ld failed error %ld\n",
				private->pid, pgoff, len, ret);
put:
	kgsl_mem_entry_put(entry);
	return ret;
}