Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cea10a19 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds
Browse files

mm: directly use __mlock_vma_pages_range() in find_extend_vma()



In find_extend_vma(), we don't need mlock_vma_pages_range() to verify
the vma type - we know we're working with a stack.  So, we can call
directly into __mlock_vma_pages_range(), and remove the last
make_pages_present() call site.

Note that we don't use mm_populate() here, so we can't release the
mmap_sem while allocating new stack pages.  This is deemed acceptable,
because the stack vmas grow by a bounded number of pages at a time, and
these are anon pages so we don't have to read from disk to populate
them.

Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Tested-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Greg Ungerer <gregungerer@westnet.com.au>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c22c0d63
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1035,7 +1035,6 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif

extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write);
+2 −2
Original line number Diff line number Diff line
@@ -162,8 +162,8 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
		struct vm_area_struct *prev, struct rb_node *rb_parent);

#ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
			unsigned long start, unsigned long end);
extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end, int *nonblocking);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
			unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
+0 −24
Original line number Diff line number Diff line
@@ -3824,30 +3824,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */

int make_pages_present(unsigned long addr, unsigned long end)
{
	int ret, len, write;
	struct vm_area_struct * vma;

	vma = find_vma(current->mm, addr);
	if (!vma)
		return -ENOMEM;
	/*
	 * We want to touch writable mappings with a write fault in order
	 * to break COW, except for shared mappings because these don't COW
	 * and we would not want to dirty them for nothing.
	 */
	write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
	BUG_ON(addr >= end);
	BUG_ON(end > vma->vm_end);
	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
	ret = get_user_pages(current, current->mm, addr,
			len, write, 0, NULL, NULL);
	if (ret < 0)
		return ret;
	return ret == len ? 0 : -EFAULT;
}

#if !defined(__HAVE_ARCH_GATE_AREA)

#if defined(AT_SYSINFO_EHDR)
+3 −54
Original line number Diff line number Diff line
@@ -155,9 +155,8 @@ void munlock_vma_page(struct page *page)
 *
 * vma->vm_mm->mmap_sem must be held for at least read.
 */
static long __mlock_vma_pages_range(struct vm_area_struct *vma,
				    unsigned long start, unsigned long end,
				    int *nonblocking)
long __mlock_vma_pages_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end, int *nonblocking)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long addr = start;
@@ -202,56 +201,6 @@ static int __mlock_posix_error_return(long retval)
	return retval;
}

/**
 * mlock_vma_pages_range() - mlock pages in specified vma range.
 * @vma - the vma containing the specfied address range
 * @start - starting address in @vma to mlock
 * @end   - end address [+1] in @vma to mlock
 *
 * For mmap()/mremap()/expansion of mlocked vma.
 *
 * return 0 on success for "normal" vmas.
 *
 * return number of pages [> 0] to be removed from locked_vm on success
 * of "special" vmas.
 */
long mlock_vma_pages_range(struct vm_area_struct *vma,
			unsigned long start, unsigned long end)
{
	int nr_pages = (end - start) / PAGE_SIZE;
	BUG_ON(!(vma->vm_flags & VM_LOCKED));

	/*
	 * filter unlockable vmas
	 */
	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
		goto no_mlock;

	if (!((vma->vm_flags & VM_DONTEXPAND) ||
			is_vm_hugetlb_page(vma) ||
			vma == get_gate_vma(current->mm))) {

		__mlock_vma_pages_range(vma, start, end, NULL);

		/* Hide errors from mmap() and other callers */
		return 0;
	}

	/*
	 * User mapped kernel pages or huge pages:
	 * make these pages present to populate the ptes, but
	 * fall thru' to reset VM_LOCKED--no need to unlock, and
	 * return nr_pages so these don't get counted against task's
	 * locked limit.  huge pages are already counted against
	 * locked vm limit.
	 */
	make_pages_present(start, end);

no_mlock:
	vma->vm_flags &= ~VM_LOCKED;	/* and don't come back! */
	return nr_pages;		/* error or pages NOT mlocked */
}

/*
 * munlock_vma_pages_range() - munlock all pages in the vma range.'
 * @vma - vma containing range to be munlock()ed.
@@ -303,7 +252,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 *
 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
 * munlock is a no-op.  However, for some special vmas, we go ahead and
 * populate the ptes via make_pages_present().
 * populate the ptes.
 *
 * For vmas that pass the filters, merge/split as appropriate.
 */
+4 −6
Original line number Diff line number Diff line
@@ -2204,9 +2204,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
		return vma;
	if (!prev || expand_stack(prev, addr))
		return NULL;
	if (prev->vm_flags & VM_LOCKED) {
		mlock_vma_pages_range(prev, addr, prev->vm_end);
	}
	if (prev->vm_flags & VM_LOCKED)
		__mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
	return prev;
}
#else
@@ -2232,9 +2231,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
	start = vma->vm_start;
	if (expand_stack(vma, addr))
		return NULL;
	if (vma->vm_flags & VM_LOCKED) {
		mlock_vma_pages_range(vma, addr, start);
	}
	if (vma->vm_flags & VM_LOCKED)
		__mlock_vma_pages_range(vma, addr, start, NULL);
	return vma;
}
#endif