Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3fe87967 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm: convert remove_migration_pte() to use page_vma_mapped_walk()

remove_migration_pte() also can easily be converted to
page_vma_mapped_walk().

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20170129173858.45174-13-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d53a8b49
Loading
Loading
Loading
Loading
+41 −61
Original line number Diff line number Diff line
@@ -193,52 +193,33 @@ void putback_movable_pages(struct list_head *l)
/*
 * Restore a potential migration pte to a working pte entry
 */
static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
				 unsigned long addr, void *old)
{
	struct mm_struct *mm = vma->vm_mm;
	struct page_vma_mapped_walk pvmw = {
		.page = old,
		.vma = vma,
		.address = addr,
		.flags = PVMW_SYNC | PVMW_MIGRATION,
	};
	struct page *new;
	pte_t pte;
	swp_entry_t entry;
 	pmd_t *pmd;
	pte_t *ptep, pte;
 	spinlock_t *ptl;

	if (unlikely(PageHuge(new))) {
		ptep = huge_pte_offset(mm, addr);
		if (!ptep)
			goto out;
		ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
	} else {
		pmd = mm_find_pmd(mm, addr);
		if (!pmd)
			goto out;

		ptep = pte_offset_map(pmd, addr);

		/*
		 * Peek to check is_swap_pte() before taking ptlock?  No, we
		 * can race mremap's move_ptes(), which skips anon_vma lock.
		 */

		ptl = pte_lockptr(mm, pmd);
	}

 	spin_lock(ptl);
	pte = *ptep;
	if (!is_swap_pte(pte))
		goto unlock;

	entry = pte_to_swp_entry(pte);

	if (!is_migration_entry(entry) ||
	    migration_entry_to_page(entry) != old)
		goto unlock;
	VM_BUG_ON_PAGE(PageTail(page), page);
	while (page_vma_mapped_walk(&pvmw)) {
		new = page - pvmw.page->index +
			linear_page_index(vma, pvmw.address);

		get_page(new);
		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
	if (pte_swp_soft_dirty(*ptep))
		if (pte_swp_soft_dirty(*pvmw.pte))
			pte = pte_mksoft_dirty(pte);

	/* Recheck VMA as permissions can change since migration started  */
		/*
		 * Recheck VMA as permissions can change since migration started
		 */
		entry = pte_to_swp_entry(*pvmw.pte);
		if (is_write_migration_entry(entry))
			pte = maybe_mkwrite(pte, vma);

@@ -249,15 +230,15 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
		}
#endif
		flush_dcache_page(new);
	set_pte_at(mm, addr, ptep, pte);
		set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);

		if (PageHuge(new)) {
			if (PageAnon(new))
			hugepage_add_anon_rmap(new, vma, addr);
				hugepage_add_anon_rmap(new, vma, pvmw.address);
			else
				page_dup_rmap(new, true);
		} else if (PageAnon(new))
		page_add_anon_rmap(new, vma, addr, false);
			page_add_anon_rmap(new, vma, pvmw.address, false);
		else
			page_add_file_rmap(new, false);

@@ -265,10 +246,9 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
			mlock_vma_page(new);

		/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, addr, ptep);
unlock:
	pte_unmap_unlock(ptep, ptl);
out:
		update_mmu_cache(vma, pvmw.address, pvmw.pte);
	}

	return SWAP_AGAIN;
}