Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbfef902 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm: Switch some TASK_SIZE checks to use mm_context addr_limit

parent 82228e36
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,

	if (len & ~huge_page_mask(h))
		return -EINVAL;
	if (len > TASK_SIZE)
	if (len > mm->context.addr_limit)
		return -ENOMEM;

	if (flags & MAP_FIXED) {
@@ -64,7 +64,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
	if (addr) {
		addr = ALIGN(addr, huge_page_size(h));
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
		if (mm->context.addr_limit - len >= addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
+6 −6
Original line number Diff line number Diff line
@@ -97,7 +97,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
	struct vm_area_struct *vma;
	struct vm_unmapped_area_info info;

	if (len > TASK_SIZE - mmap_min_addr)
	if (len > mm->context.addr_limit - mmap_min_addr)
		return -ENOMEM;

	if (flags & MAP_FIXED)
@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
	if (addr) {
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
		if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
@@ -114,7 +114,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
	info.flags = 0;
	info.length = len;
	info.low_limit = mm->mmap_base;
	info.high_limit = TASK_SIZE;
	info.high_limit = mm->context.addr_limit;
	info.align_mask = 0;
	return vm_unmapped_area(&info);
}
@@ -132,7 +132,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
	struct vm_unmapped_area_info info;

	/* requested length too big for entire address space */
	if (len > TASK_SIZE - mmap_min_addr)
	if (len > mm->context.addr_limit - mmap_min_addr)
		return -ENOMEM;

	if (flags & MAP_FIXED)
@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
	if (addr) {
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
		if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr &&
				(!vma || addr + len <= vma->vm_start))
			return addr;
	}
@@ -164,7 +164,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
		VM_BUG_ON(addr != -ENOMEM);
		info.flags = 0;
		info.low_limit = TASK_UNMAPPED_BASE;
		info.high_limit = TASK_SIZE;
		info.high_limit = mm->context.addr_limit;
		addr = vm_unmapped_area(&info);
	}

+3 −3
Original line number Diff line number Diff line
@@ -277,7 +277,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
	info.align_offset = 0;

	addr = TASK_UNMAPPED_BASE;
	while (addr < TASK_SIZE) {
	while (addr < mm->context.addr_limit) {
		info.low_limit = addr;
		if (!slice_scan_available(addr, available, 1, &addr))
			continue;
@@ -289,8 +289,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
		 * Check if we need to reduce the range, or if we can
		 * extend it to cover the next available slice.
		 */
		if (addr >= TASK_SIZE)
			addr = TASK_SIZE;
		if (addr >= mm->context.addr_limit)
			addr = mm->context.addr_limit;
		else if (slice_scan_available(addr, available, 1, &next_end)) {
			addr = next_end;
			goto next_slice;
+2 −1
Original line number Diff line number Diff line
@@ -197,7 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)

	/* Check parameters */
	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
	    addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
	    addr >= mm->context.addr_limit || len >= mm->context.addr_limit ||
	    addr + len > mm->context.addr_limit)
		return -EINVAL;

	if (is_hugepage_only_range(mm, addr, len))