Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 56f31801 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

mm: cleanup "swapcache" in do_swap_page



I dislike the way in which "swapcache" gets used in do_swap_page():
there is always a page from swapcache there (even if maybe uncached by
the time we lock it), but tests are made according to "swapcache".
Rework that with "page != swapcache", as has been done in unuse_pte().

Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9e16b7fb
Loading
Loading
Loading
Loading
+8 −10
Original line number Original line Diff line number Diff line
@@ -2954,7 +2954,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned int flags, pte_t orig_pte)
		unsigned int flags, pte_t orig_pte)
{
{
	spinlock_t *ptl;
	spinlock_t *ptl;
	struct page *page, *swapcache = NULL;
	struct page *page, *swapcache;
	swp_entry_t entry;
	swp_entry_t entry;
	pte_t pte;
	pte_t pte;
	int locked;
	int locked;
@@ -3005,9 +3005,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
		 */
		 */
		ret = VM_FAULT_HWPOISON;
		ret = VM_FAULT_HWPOISON;
		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
		swapcache = page;
		goto out_release;
		goto out_release;
	}
	}


	swapcache = page;
	locked = lock_page_or_retry(page, mm, flags);
	locked = lock_page_or_retry(page, mm, flags);


	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -3025,16 +3027,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
	if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
	if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
		goto out_page;
		goto out_page;


	swapcache = page;
	page = ksm_might_need_to_copy(page, vma, address);
	page = ksm_might_need_to_copy(page, vma, address);
	if (unlikely(!page)) {
	if (unlikely(!page)) {
		ret = VM_FAULT_OOM;
		ret = VM_FAULT_OOM;
		page = swapcache;
		page = swapcache;
		swapcache = NULL;
		goto out_page;
		goto out_page;
	}
	}
	if (page == swapcache)
		swapcache = NULL;


	if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
	if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
		ret = VM_FAULT_OOM;
		ret = VM_FAULT_OOM;
@@ -3078,10 +3076,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
	}
	}
	flush_icache_page(vma, page);
	flush_icache_page(vma, page);
	set_pte_at(mm, address, page_table, pte);
	set_pte_at(mm, address, page_table, pte);
	if (swapcache) /* ksm created a completely new copy */
	if (page == swapcache)
		page_add_new_anon_rmap(page, vma, address);
	else
		do_page_add_anon_rmap(page, vma, address, exclusive);
		do_page_add_anon_rmap(page, vma, address, exclusive);
	else /* ksm created a completely new copy */
		page_add_new_anon_rmap(page, vma, address);
	/* It's better to call commit-charge after rmap is established */
	/* It's better to call commit-charge after rmap is established */
	mem_cgroup_commit_charge_swapin(page, ptr);
	mem_cgroup_commit_charge_swapin(page, ptr);


@@ -3089,7 +3087,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
		try_to_free_swap(page);
		try_to_free_swap(page);
	unlock_page(page);
	unlock_page(page);
	if (swapcache) {
	if (page != swapcache) {
		/*
		/*
		 * Hold the lock to avoid the swap entry to be reused
		 * Hold the lock to avoid the swap entry to be reused
		 * until we take the PT lock for the pte_same() check
		 * until we take the PT lock for the pte_same() check
@@ -3122,7 +3120,7 @@ out_page:
	unlock_page(page);
	unlock_page(page);
out_release:
out_release:
	page_cache_release(page);
	page_cache_release(page);
	if (swapcache) {
	if (page != swapcache) {
		unlock_page(swapcache);
		unlock_page(swapcache);
		page_cache_release(swapcache);
		page_cache_release(swapcache);
	}
	}