Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd529596 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds
Browse files

mm: use vm_unmapped_area() in hugetlbfs on tile architecture



Update the tile hugetlb_get_unmapped_area function to make use of
vm_unmapped_area() instead of implementing a brute force search.

[akpm@linux-foundation.org: fix build]
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a046be3d
Loading
Loading
Loading
Loading
+25 −114
Original line number Original line Diff line number Diff line
@@ -231,42 +231,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
		unsigned long pgoff, unsigned long flags)
		unsigned long pgoff, unsigned long flags)
{
{
	struct hstate *h = hstate_file(file);
	struct hstate *h = hstate_file(file);
	struct mm_struct *mm = current->mm;
	struct vm_unmapped_area_info info;
	struct vm_area_struct *vma;
	unsigned long start_addr;

	if (len > mm->cached_hole_size) {
		start_addr = mm->free_area_cache;
	} else {
		start_addr = TASK_UNMAPPED_BASE;
		mm->cached_hole_size = 0;
	}


full_search:
	info.flags = 0;
	addr = ALIGN(start_addr, huge_page_size(h));
	info.length = len;

	info.low_limit = TASK_UNMAPPED_BASE;
	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
	info.high_limit = TASK_SIZE;
		/* At this point:  (!vma || addr < vma->vm_end). */
	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
		if (TASK_SIZE - len < addr) {
	info.align_offset = 0;
			/*
	return vm_unmapped_area(&info);
			 * Start a new search - just in case we missed
			 * some holes.
			 */
			if (start_addr != TASK_UNMAPPED_BASE) {
				start_addr = TASK_UNMAPPED_BASE;
				mm->cached_hole_size = 0;
				goto full_search;
			}
			return -ENOMEM;
		}
		if (!vma || addr + len <= vma->vm_start) {
			mm->free_area_cache = addr + len;
			return addr;
		}
		if (addr + mm->cached_hole_size < vma->vm_start)
			mm->cached_hole_size = vma->vm_start - addr;
		addr = ALIGN(vma->vm_end, huge_page_size(h));
	}
}
}


static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@@ -274,92 +247,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
		unsigned long pgoff, unsigned long flags)
		unsigned long pgoff, unsigned long flags)
{
{
	struct hstate *h = hstate_file(file);
	struct hstate *h = hstate_file(file);
	struct mm_struct *mm = current->mm;
	struct vm_unmapped_area_info info;
	struct vm_area_struct *vma, *prev_vma;
	unsigned long addr;
	unsigned long base = mm->mmap_base, addr = addr0;
	unsigned long largest_hole = mm->cached_hole_size;
	int first_time = 1;

	/* don't allow allocations above current base */
	if (mm->free_area_cache > base)
		mm->free_area_cache = base;

	if (len <= largest_hole) {
		largest_hole = 0;
		mm->free_area_cache  = base;
	}
try_again:
	/* make sure it can fit in the remaining address space */
	if (mm->free_area_cache < len)
		goto fail;

	/* either no address requested or can't fit in requested address hole */
	addr = (mm->free_area_cache - len) & huge_page_mask(h);
	do {
		/*
		 * Lookup failure means no vma is above this address,
		 * i.e. return with success:
		 */
		vma = find_vma_prev(mm, addr, &prev_vma);
		if (!vma) {
			return addr;
			break;
		}

		/*
		 * new region fits between prev_vma->vm_end and
		 * vma->vm_start, use it:
		 */
		if (addr + len <= vma->vm_start &&
			    (!prev_vma || (addr >= prev_vma->vm_end))) {
			/* remember the address as a hint for next time */
			mm->cached_hole_size = largest_hole;
			mm->free_area_cache = addr;
			return addr;
		} else {
			/* pull free_area_cache down to the first hole */
			if (mm->free_area_cache == vma->vm_end) {
				mm->free_area_cache = vma->vm_start;
				mm->cached_hole_size = largest_hole;
			}
		}

		/* remember the largest hole we saw so far */
		if (addr + largest_hole < vma->vm_start)
			largest_hole = vma->vm_start - addr;


		/* try just below the current vma->vm_start */
	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
		addr = (vma->vm_start - len) & huge_page_mask(h);
	info.length = len;
	info.low_limit = PAGE_SIZE;
	info.high_limit = current->mm->mmap_base;
	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
	info.align_offset = 0;
	addr = vm_unmapped_area(&info);


	} while (len <= vma->vm_start);

fail:
	/*
	 * if hint left us with no space for the requested
	 * mapping then try again:
	 */
	if (first_time) {
		mm->free_area_cache = base;
		largest_hole = 0;
		first_time = 0;
		goto try_again;
	}
	/*
	/*
	 * A failed mmap() very likely causes application failure,
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 * allocations.
	 */
	 */
	mm->free_area_cache = TASK_UNMAPPED_BASE;
	if (addr & ~PAGE_MASK) {
	mm->cached_hole_size = ~0UL;
		VM_BUG_ON(addr != -ENOMEM);
	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
		info.flags = 0;
			len, pgoff, flags);
		info.low_limit = TASK_UNMAPPED_BASE;

		info.high_limit = TASK_SIZE;
	/*
		addr = vm_unmapped_area(&info);
	 * Restore the topdown base:
	}
	 */
	mm->free_area_cache = base;
	mm->cached_hole_size = ~0UL;


	return addr;
	return addr;
}
}