Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 394ef640 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds
Browse files

mm: use vm_unmapped_area() on arm architecture



Update the arm arch_get_unmapped_area[_topdown] functions to make use of
vm_unmapped_area() instead of implementing a brute force search.

[akpm@linux-foundation.org: remove now-unused COLOUR_ALIGN_DOWN()]
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b6661861
Loading
Loading
Loading
Loading
+23 −109
Original line number Original line Diff line number Diff line
@@ -11,18 +11,6 @@
#include <linux/random.h>
#include <linux/random.h>
#include <asm/cachetype.h>
#include <asm/cachetype.h>


static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
					      unsigned long pgoff)
{
	unsigned long base = addr & ~(SHMLBA-1);
	unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);

	if (base + off <= addr)
		return base + off;

	return base - off;
}

#define COLOUR_ALIGN(addr,pgoff)		\
#define COLOUR_ALIGN(addr,pgoff)		\
	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
@@ -69,9 +57,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
{
	struct mm_struct *mm = current->mm;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	struct vm_area_struct *vma;
	unsigned long start_addr;
	int do_align = 0;
	int do_align = 0;
	int aliasing = cache_is_vipt_aliasing();
	int aliasing = cache_is_vipt_aliasing();
	struct vm_unmapped_area_info info;


	/*
	/*
	 * We only need to do colour alignment if either the I or D
	 * We only need to do colour alignment if either the I or D
@@ -104,46 +92,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
		    (!vma || addr + len <= vma->vm_start))
		    (!vma || addr + len <= vma->vm_start))
			return addr;
			return addr;
	}
	}
	if (len > mm->cached_hole_size) {
	        start_addr = addr = mm->free_area_cache;
	} else {
	        start_addr = addr = mm->mmap_base;
	        mm->cached_hole_size = 0;
	}


full_search:
	info.flags = 0;
	if (do_align)
	info.length = len;
		addr = COLOUR_ALIGN(addr, pgoff);
	info.low_limit = mm->mmap_base;
	else
	info.high_limit = TASK_SIZE;
		addr = PAGE_ALIGN(addr);
	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;

	info.align_offset = pgoff << PAGE_SHIFT;
	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
	return vm_unmapped_area(&info);
		/* At this point:  (!vma || addr < vma->vm_end). */
		if (TASK_SIZE - len < addr) {
			/*
			 * Start a new search - just in case we missed
			 * some holes.
			 */
			if (start_addr != TASK_UNMAPPED_BASE) {
				start_addr = addr = TASK_UNMAPPED_BASE;
				mm->cached_hole_size = 0;
				goto full_search;
			}
			return -ENOMEM;
		}
		if (!vma || addr + len <= vma->vm_start) {
			/*
			 * Remember the place where we stopped the search:
			 */
			mm->free_area_cache = addr + len;
			return addr;
		}
		if (addr + mm->cached_hole_size < vma->vm_start)
		        mm->cached_hole_size = vma->vm_start - addr;
		addr = vma->vm_end;
		if (do_align)
			addr = COLOUR_ALIGN(addr, pgoff);
	}
}
}


unsigned long
unsigned long
@@ -156,6 +112,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
	unsigned long addr = addr0;
	unsigned long addr = addr0;
	int do_align = 0;
	int do_align = 0;
	int aliasing = cache_is_vipt_aliasing();
	int aliasing = cache_is_vipt_aliasing();
	struct vm_unmapped_area_info info;


	/*
	/*
	 * We only need to do colour alignment if either the I or D
	 * We only need to do colour alignment if either the I or D
@@ -187,70 +144,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
			return addr;
			return addr;
	}
	}


	/* check if free_area_cache is useful for us */
	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
	if (len <= mm->cached_hole_size) {
	info.length = len;
		mm->cached_hole_size = 0;
	info.low_limit = PAGE_SIZE;
		mm->free_area_cache = mm->mmap_base;
	info.high_limit = mm->mmap_base;
	}
	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;

	info.align_offset = pgoff << PAGE_SHIFT;
	/* either no address requested or can't fit in requested address hole */
	addr = vm_unmapped_area(&info);
	addr = mm->free_area_cache;
	if (do_align) {
		unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
		addr = base + len;
	}

	/* make sure it can fit in the remaining address space */
	if (addr > len) {
		vma = find_vma(mm, addr-len);
		if (!vma || addr <= vma->vm_start)
			/* remember the address as a hint for next time */
			return (mm->free_area_cache = addr-len);
	}

	if (mm->mmap_base < len)
		goto bottomup;

	addr = mm->mmap_base - len;
	if (do_align)
		addr = COLOUR_ALIGN_DOWN(addr, pgoff);


	do {
		/*
		 * Lookup failure means no vma is above this address,
		 * else if new region fits below vma->vm_start,
		 * return with success:
		 */
		vma = find_vma(mm, addr);
		if (!vma || addr+len <= vma->vm_start)
			/* remember the address as a hint for next time */
			return (mm->free_area_cache = addr);

		/* remember the largest hole we saw so far */
		if (addr + mm->cached_hole_size < vma->vm_start)
			mm->cached_hole_size = vma->vm_start - addr;

		/* try just below the current vma->vm_start */
		addr = vma->vm_start - len;
		if (do_align)
			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
	} while (len < vma->vm_start);

bottomup:
	/*
	/*
	 * A failed mmap() very likely causes application failure,
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 * allocations.
	 */
	 */
	mm->cached_hole_size = ~0UL;
	if (addr & ~PAGE_MASK) {
	mm->free_area_cache = TASK_UNMAPPED_BASE;
		VM_BUG_ON(addr != -ENOMEM);
	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
		info.flags = 0;
	/*
		info.low_limit = mm->mmap_base;
	 * Restore the topdown base:
		info.high_limit = TASK_SIZE;
	 */
		addr = vm_unmapped_area(&info);
	mm->free_area_cache = mm->mmap_base;
	}
	mm->cached_hole_size = ~0UL;


	return addr;
	return addr;
}
}