Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9617d95e authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds
Browse files

[PATCH] mm: rmap optimisation



Optimise rmap functions by minimising atomic operations when we know there
will be no concurrent modifications.

Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 224abf92
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma,
	lru_cache_add_active(page);
	set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
					page, vma->vm_page_prot))));
	page_add_anon_rmap(page, vma, address);
	page_add_new_anon_rmap(page, vma, address);
	pte_unmap_unlock(pte, ptl);

	/* no need for flush_tlb */
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *);
 * rmap interfaces called when adding or removing pte of page
 */
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);

+3 −3
Original line number Diff line number Diff line
@@ -1498,7 +1498,7 @@ gotten:
		update_mmu_cache(vma, address, entry);
		lazy_mmu_prot_update(entry);
		lru_cache_add_active(new_page);
		page_add_anon_rmap(new_page, vma, address);
		page_add_new_anon_rmap(new_page, vma, address);

		/* Free the old page.. */
		new_page = old_page;
@@ -1978,7 +1978,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
		inc_mm_counter(mm, anon_rss);
		lru_cache_add_active(page);
		SetPageReferenced(page);
		page_add_anon_rmap(page, vma, address);
		page_add_new_anon_rmap(page, vma, address);
	} else {
		/* Map the ZERO_PAGE - vm_page_prot is readonly */
		page = ZERO_PAGE(address);
@@ -2109,7 +2109,7 @@ retry:
		if (anon) {
			inc_mm_counter(mm, anon_rss);
			lru_cache_add_active(new_page);
			page_add_anon_rmap(new_page, vma, address);
			page_add_new_anon_rmap(new_page, vma, address);
		} else {
			inc_mm_counter(mm, file_rss);
			page_add_file_rmap(new_page);
+38 −11
Original line number Diff line number Diff line
@@ -435,17 +435,14 @@ int page_referenced(struct page *page, int is_locked)
}

/**
 * page_add_anon_rmap - add pte mapping to an anonymous page
 * page_set_anon_rmap - setup new anonymous rmap
 * @page:	the page to add the mapping to
 * @vma:	the vm area in which the mapping is added
 * @address:	the user virtual address mapped
 *
 * The caller needs to hold the pte lock.
 */
void page_add_anon_rmap(struct page *page,
static void __page_set_anon_rmap(struct page *page,
	struct vm_area_struct *vma, unsigned long address)
{
	if (atomic_inc_and_test(&page->_mapcount)) {
	struct anon_vma *anon_vma = vma->anon_vma;

	BUG_ON(!anon_vma);
@@ -456,9 +453,39 @@ void page_add_anon_rmap(struct page *page,

	inc_page_state(nr_mapped);
}

/**
 * page_add_anon_rmap - add pte mapping to an anonymous page
 * @page:	the page to add the mapping to
 * @vma:	the vm area in which the mapping is added
 * @address:	the user virtual address mapped
 *
 * The caller needs to hold the pte lock.
 */
void page_add_anon_rmap(struct page *page,
	struct vm_area_struct *vma, unsigned long address)
{
	if (atomic_inc_and_test(&page->_mapcount))
		__page_set_anon_rmap(page, vma, address);
	/* else checking page index and mapping is racy */
}

/*
 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
 * @page:	the page to add the mapping to
 * @vma:	the vm area in which the mapping is added
 * @address:	the user virtual address mapped
 *
 * Same as page_add_anon_rmap but must only be called on *new* pages.
 * This means the inc-and-test can be bypassed.
 */
void page_add_new_anon_rmap(struct page *page,
	struct vm_area_struct *vma, unsigned long address)
{
	atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
	__page_set_anon_rmap(page, vma, address);
}

/**
 * page_add_file_rmap - add pte mapping to a file page
 * @page: the page to add the mapping to