Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ee07a44 authored by Jianyu Zhan's avatar Jianyu Zhan Committed by Linus Torvalds
Browse files

mm: fold mlocked_vma_newpage() into its only call site



In previous commit(mm: use the light version __mod_zone_page_state in
mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used.  And as
suggested by Andrew, to reduce the risks that new call sites incorrectly
using mlocked_vma_newpage() without knowing they are adding racing, this
patch folds mlocked_vma_newpage() into its only call site,
page_add_new_anon_rmap, to make it open-cocded for people to know what is
going on.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarJianyu Zhan <nasa4836@gmail.com>
Suggested-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Suggested-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bea04b07
Loading
Loading
Loading
Loading
+0 −29
Original line number Diff line number Diff line
@@ -188,31 +188,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}

/*
 * Called only in fault path, to determine if a new page is being
 * mapped into a LOCKED vma.  If it is, mark page as mlocked.
 */
static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
				    struct page *page)
{
	VM_BUG_ON_PAGE(PageLRU(page), page);

	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
		return 0;

	if (!TestSetPageMlocked(page)) {
		/*
		 * We use the irq-unsafe __mod_zone_page_stat because this
		 * counter is not modified from interrupt context, and the pte
		 * lock is held(spinlock), which implies preemption disabled.
		 */
		__mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
		count_vm_event(UNEVICTABLE_PGMLOCKED);
	}
	return 1;
}

/*
 * must be called with vma's mmap_sem held for read or write, and page locked.
 */
@@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
				 struct vm_area_struct *vma);
#endif
#else /* !CONFIG_MMU */
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
{
	return 0;
}
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
+17 −3
Original line number Diff line number Diff line
@@ -1032,10 +1032,24 @@ void page_add_new_anon_rmap(struct page *page,
	__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
			hpage_nr_pages(page));
	__page_set_anon_rmap(page, vma, address, 1);
	if (!mlocked_vma_newpage(vma, page)) {

	VM_BUG_ON_PAGE(PageLRU(page), page);
	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
		SetPageActive(page);
		lru_cache_add(page);
	} else
		return;
	}

	if (!TestSetPageMlocked(page)) {
		/*
		 * We use the irq-unsafe __mod_zone_page_stat because this
		 * counter is not modified from interrupt context, and the pte
		 * lock is held(spinlock), which implies preemption disabled.
		 */
		__mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
		count_vm_event(UNEVICTABLE_PGMLOCKED);
	}
	add_page_to_unevictable_list(page);
}