Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 451b9514 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Linus Torvalds
Browse files

mm: remove __hugepage_set_anon_rmap()

This function is identical to __page_set_anon_rmap() since the time, when
it was introduced (8 years ago).  The patch removes the function, and
makes its users to use __page_set_anon_rmap() instead.

Link: http://lkml.kernel.org/r/154504875359.30235.6237926369392564851.stgit@localhost.localdomain


Signed-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4918e762
Loading
Loading
Loading
Loading
+4 −21
Original line number Diff line number Diff line
@@ -1019,7 +1019,7 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)

/**
 * __page_set_anon_rmap - set up new anonymous rmap
 * @page:	Page to add to rmap	
 * @page:	Page or Hugepage to add to rmap
 * @vma:	VM area to add page to.
 * @address:	User virtual address of the mapping	
 * @exclusive:	the page is exclusively owned by the current process
@@ -1916,27 +1916,10 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)

#ifdef CONFIG_HUGETLB_PAGE
/*
 * The following three functions are for anonymous (private mapped) hugepages.
 * The following two functions are for anonymous (private mapped) hugepages.
 * Unlike common anonymous pages, anonymous hugepages have no accounting code
 * and no lru code, because we handle hugepages differently from common pages.
 */
static void __hugepage_set_anon_rmap(struct page *page,
	struct vm_area_struct *vma, unsigned long address, int exclusive)
{
	struct anon_vma *anon_vma = vma->anon_vma;

	BUG_ON(!anon_vma);

	if (PageAnon(page))
		return;
	if (!exclusive)
		anon_vma = anon_vma->root;

	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
	page->mapping = (struct address_space *) anon_vma;
	page->index = linear_page_index(vma, address);
}

void hugepage_add_anon_rmap(struct page *page,
			    struct vm_area_struct *vma, unsigned long address)
{
@@ -1948,7 +1931,7 @@ void hugepage_add_anon_rmap(struct page *page,
	/* address might be in next vma when migration races vma_adjust */
	first = atomic_inc_and_test(compound_mapcount_ptr(page));
	if (first)
		__hugepage_set_anon_rmap(page, vma, address, 0);
		__page_set_anon_rmap(page, vma, address, 0);
}

void hugepage_add_new_anon_rmap(struct page *page,
@@ -1956,6 +1939,6 @@ void hugepage_add_new_anon_rmap(struct page *page,
{
	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
	atomic_set(compound_mapcount_ptr(page), 0);
	__hugepage_set_anon_rmap(page, vma, address, 1);
	__page_set_anon_rmap(page, vma, address, 1);
}
#endif /* CONFIG_HUGETLB_PAGE */