Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84d33df2 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm: rename FOLL_MLOCK to FOLL_POPULATE



After commit a1fde08c ("VM: skip the stack guard page lookup in
get_user_pages only for mlock") FOLL_MLOCK has lost its original
meaning: we don't necessarily mlock the page if the flags is set -- we
also take VM_LOCKED into consideration.

Since we use the same codepath for __mm_populate(), let's rename
FOLL_MLOCK to FOLL_POPULATE.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c21a6daf
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2109,7 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
				 * and return without waiting upon it */
#define FOLL_MLOCK	0x40	/* mark page as mlocked */
#define FOLL_POPULATE	0x40	/* fault in page */
#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
+3 −3
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
		 */
		mark_page_accessed(page);
	}
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
	if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
		/*
		 * The preliminary mapping check is mainly to avoid the
		 * pointless overhead of lock_page on the ZERO_PAGE
@@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
	unsigned int fault_flags = 0;
	int ret;

	/* For mlock, just skip the stack guard page. */
	if ((*flags & FOLL_MLOCK) &&
	/* For mm_populate(), just skip the stack guard page. */
	if ((*flags & FOLL_POPULATE) &&
			(stack_guard_page_start(vma, address) ||
			 stack_guard_page_end(vma, address + PAGE_SIZE)))
		return -ENOENT;
+1 −1
Original line number Diff line number Diff line
@@ -1231,7 +1231,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
					  pmd, _pmd,  1))
			update_mmu_cache_pmd(vma, addr, pmd);
	}
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
	if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
		if (page->mapping && trylock_page(page)) {
			lru_add_drain();
			if (page->mapping)
+1 −1
Original line number Diff line number Diff line
@@ -237,7 +237,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);

	gup_flags = FOLL_TOUCH | FOLL_MLOCK;
	gup_flags = FOLL_TOUCH | FOLL_POPULATE;
	/*
	 * We want to touch writable mappings with a write fault in order
	 * to break COW, except for shared mappings because these don't COW