Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ff6a6da6 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds
Browse files

mm: accelerate munlock() treatment of THP pages



munlock_vma_pages_range() was always incrementing addresses by PAGE_SIZE
at a time.  When munlocking THP pages (or the huge zero page), this
resulted in taking the mm->page_table_lock 512 times in a row.

We can do better by making use of the page_mask returned by
follow_page_mask (for the huge zero page case), or the size of the page
munlock_vma_page() operated on (for the true THP page case).

Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c5a51053
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
 * must be called with vma's mmap_sem held for read or write, and page locked.
 * must be called with vma's mmap_sem held for read or write, and page locked.
 */
 */
extern void mlock_vma_page(struct page *page);
extern void mlock_vma_page(struct page *page);
extern void munlock_vma_page(struct page *page);
extern unsigned int munlock_vma_page(struct page *page);


/*
/*
 * Clear the page's PageMlocked().  This can be useful in a situation where
 * Clear the page's PageMlocked().  This can be useful in a situation where
+23 −11
Original line number Original line Diff line number Diff line
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * [page_referenced()/try_to_unmap()] to deal with.
 * [page_referenced()/try_to_unmap()] to deal with.
 */
 */
void munlock_vma_page(struct page *page)
unsigned int munlock_vma_page(struct page *page)
{
{
	unsigned int page_mask = 0;

	BUG_ON(!PageLocked(page));
	BUG_ON(!PageLocked(page));


	if (TestClearPageMlocked(page)) {
	if (TestClearPageMlocked(page)) {
		mod_zone_page_state(page_zone(page), NR_MLOCK,
		unsigned int nr_pages = hpage_nr_pages(page);
				    -hpage_nr_pages(page));
		mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
		page_mask = nr_pages - 1;
		if (!isolate_lru_page(page)) {
		if (!isolate_lru_page(page)) {
			int ret = SWAP_AGAIN;
			int ret = SWAP_AGAIN;


@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
				count_vm_event(UNEVICTABLE_PGMUNLOCKED);
				count_vm_event(UNEVICTABLE_PGMUNLOCKED);
		}
		}
	}
	}

	return page_mask;
}
}


/**
/**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end, int *nonblocking)
		unsigned long start, unsigned long end, int *nonblocking)
{
{
	struct mm_struct *mm = vma->vm_mm;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long addr = start;
	unsigned long nr_pages = (end - start) / PAGE_SIZE;
	unsigned long nr_pages = (end - start) / PAGE_SIZE;
	int gup_flags;
	int gup_flags;


@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
	 * We made sure addr is within a VMA, so the following will
	 * We made sure addr is within a VMA, so the following will
	 * not result in a stack expansion that recurses back here.
	 * not result in a stack expansion that recurses back here.
	 */
	 */
	return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
				NULL, NULL, nonblocking);
				NULL, NULL, nonblocking);
}
}


@@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval)
void munlock_vma_pages_range(struct vm_area_struct *vma,
void munlock_vma_pages_range(struct vm_area_struct *vma,
			     unsigned long start, unsigned long end)
			     unsigned long start, unsigned long end)
{
{
	unsigned long addr;

	lru_add_drain();
	vma->vm_flags &= ~VM_LOCKED;
	vma->vm_flags &= ~VM_LOCKED;


	for (addr = start; addr < end; addr += PAGE_SIZE) {
	while (start < end) {
		struct page *page;
		struct page *page;
		unsigned int page_mask, page_increm;

		/*
		/*
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * it just so happens that its special treatment of the
		 * it just so happens that its special treatment of the
@@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
		 * suits munlock very well (and if somehow an abnormal page
		 * suits munlock very well (and if somehow an abnormal page
		 * has sneaked into the range, we won't oops here: great).
		 * has sneaked into the range, we won't oops here: great).
		 */
		 */
		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
					&page_mask);
		if (page && !IS_ERR(page)) {
		if (page && !IS_ERR(page)) {
			lock_page(page);
			lock_page(page);
			munlock_vma_page(page);
			lru_add_drain();
			/*
			 * Any THP page found by follow_page_mask() may have
			 * gotten split before reaching munlock_vma_page(),
			 * so we need to recompute the page_mask here.
			 */
			page_mask = munlock_vma_page(page);
			unlock_page(page);
			unlock_page(page);
			put_page(page);
			put_page(page);
		}
		}
		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
		start += page_increm * PAGE_SIZE;
		cond_resched();
		cond_resched();
	}
	}
}
}