Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b895253 authored by Peter Xu's avatar Peter Xu Committed by Gerrit - the friendly Code Review server
Browse files

mm/ksm: Remove reuse_ksm_page()



Remove the function as the last reference has gone away with the do_wp_page()
changes.

Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Git-commit: 1a0cf26323c80e2f1c58fc04f15686de61bfab0c
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git


Change-Id: Icd11ae747c4b0b9ddd81b8b3c5e463ed88abea9c
Signed-off-by: default avatarZhenhua Huang <zhenhuah@codeaurora.org>
parent f63da7af
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page,

void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
bool reuse_ksm_page(struct page *page,
			struct vm_area_struct *vma, unsigned long address);

#else  /* !CONFIG_KSM */

@@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page,
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
static inline bool reuse_ksm_page(struct page *page,
			struct vm_area_struct *vma, unsigned long address)
{
	return false;
}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */

+0 −25
Original line number Diff line number Diff line
@@ -2660,31 +2660,6 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
		goto again;
}

bool reuse_ksm_page(struct page *page,
		    struct vm_area_struct *vma,
		    unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
	if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
			WARN_ON(!page_mapped(page)) ||
			WARN_ON(!PageLocked(page))) {
		dump_page(page, "reuse_ksm_page");
		return false;
	}
#endif

	if (PageSwapCache(page) || !page_stable_node(page))
		return false;
	/* Prohibit parallel get_ksm_page() */
	if (!page_ref_freeze(page, 1))
		return false;

	page_move_anon_rmap(page, vma);
	page->index = linear_page_index(vma, address);
	page_ref_unfreeze(page, 1);

	return true;
}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{