Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8749cfea authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds
Browse files

mm: add page_check_address_transhuge() helper



page_referenced_one() and page_idle_clear_pte_refs_one() duplicate the
code for looking up pte of a (possibly transhuge) page.  Move this code
to a new helper function, page_check_address_transhuge(), and make the
above mentioned functions use it.

This is just a cleanup, no functional changes are intended.

Signed-off-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Reviewed-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d9654322
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -215,6 +215,25 @@ static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
	return ptep;
}

/*
 * Used by idle page tracking to check if a page was referenced via page
 * tables.
 */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
				  unsigned long address, pmd_t **pmdp,
				  pte_t **ptep, spinlock_t **ptlp);
#else
static inline bool page_check_address_transhuge(struct page *page,
				struct mm_struct *mm, unsigned long address,
				pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
{
	*ptep = page_check_address(page, mm, address, ptlp, 0);
	*pmdp = NULL;
	return !!*ptep;
}
#endif

/*
 * Used by swapoff to help locate where page is expected in vma.
 */
+9 −54
Original line number Diff line number Diff line
@@ -55,71 +55,26 @@ static int page_idle_clear_pte_refs_one(struct page *page,
					unsigned long addr, void *arg)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	spinlock_t *ptl;
	bool referenced = false;

	pgd = pgd_offset(mm, addr);
	if (!pgd_present(*pgd))
	if (!page_check_address_transhuge(page, mm, addr, &pmd, &pte, &ptl))
		return SWAP_AGAIN;
	pud = pud_offset(pgd, addr);
	if (!pud_present(*pud))
		return SWAP_AGAIN;
	pmd = pmd_offset(pud, addr);

	if (pmd_trans_huge(*pmd)) {
		ptl = pmd_lock(mm, pmd);
		if (!pmd_present(*pmd))
			goto unlock_pmd;
		if (unlikely(!pmd_trans_huge(*pmd))) {
			spin_unlock(ptl);
			goto map_pte;
		}

		if (pmd_page(*pmd) != page)
			goto unlock_pmd;

	if (pte) {
		referenced = ptep_clear_young_notify(vma, addr, pte);
		pte_unmap(pte);
	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
		referenced = pmdp_clear_young_notify(vma, addr, pmd);
		spin_unlock(ptl);
		goto found;
unlock_pmd:
		spin_unlock(ptl);
		return SWAP_AGAIN;
	} else {
		pmd_t pmde = *pmd;

		barrier();
		if (!pmd_present(pmde) || pmd_trans_huge(pmde))
			return SWAP_AGAIN;

		/* unexpected pmd-mapped page? */
		WARN_ON_ONCE(1);
	}
map_pte:
	pte = pte_offset_map(pmd, addr);
	if (!pte_present(*pte)) {
		pte_unmap(pte);
		return SWAP_AGAIN;
	}

	ptl = pte_lockptr(mm, pmd);
	spin_lock(ptl);

	if (!pte_present(*pte)) {
		pte_unmap_unlock(pte, ptl);
		return SWAP_AGAIN;
	}

	/* THP can be referenced by any subpage */
	if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
		pte_unmap_unlock(pte, ptl);
		return SWAP_AGAIN;
	}
	spin_unlock(ptl);

	referenced = ptep_clear_young_notify(vma, addr, pte);
	pte_unmap_unlock(pte, ptl);
found:
	if (referenced) {
		clear_page_idle(page);
		/*
+71 −44
Original line number Diff line number Diff line
@@ -798,48 +798,44 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
	return 1;
}

struct page_referenced_arg {
	int mapcount;
	int referenced;
	unsigned long vm_flags;
	struct mem_cgroup *memcg;
};
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * arg: page_referenced_arg will be passed
 * Check that @page is mapped at @address into @mm. In contrast to
 * page_check_address(), this function can handle transparent huge pages.
 *
 * On success returns true with pte mapped and locked. For PMD-mapped
 * transparent huge pages *@ptep is set to NULL.
 */
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
			unsigned long address, void *arg)
bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
				  unsigned long address, pmd_t **pmdp,
				  pte_t **ptep, spinlock_t **ptlp)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
	int referenced = 0;
	struct page_referenced_arg *pra = arg;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	spinlock_t *ptl;

	if (unlikely(PageHuge(page))) {
		/* when pud is not present, pte will be NULL */
		pte = huge_pte_offset(mm, address);
		if (!pte)
			return SWAP_AGAIN;
			return false;

		ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
		pmd = NULL;
		goto check_pte;
	}

	pgd = pgd_offset(mm, address);
	if (!pgd_present(*pgd))
		return SWAP_AGAIN;
		return false;
	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return SWAP_AGAIN;
		return false;
	pmd = pmd_offset(pud, address);

	if (pmd_trans_huge(*pmd)) {
		int ret = SWAP_AGAIN;

		ptl = pmd_lock(mm, pmd);
		if (!pmd_present(*pmd))
			goto unlock_pmd;
@@ -851,31 +847,23 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
		if (pmd_page(*pmd) != page)
			goto unlock_pmd;

		if (vma->vm_flags & VM_LOCKED) {
			pra->vm_flags |= VM_LOCKED;
			ret = SWAP_FAIL; /* To break the loop */
			goto unlock_pmd;
		}

		if (pmdp_clear_flush_young_notify(vma, address, pmd))
			referenced++;
		spin_unlock(ptl);
		pte = NULL;
		goto found;
unlock_pmd:
		spin_unlock(ptl);
		return ret;
		return false;
	} else {
		pmd_t pmde = *pmd;

		barrier();
		if (!pmd_present(pmde) || pmd_trans_huge(pmde))
			return SWAP_AGAIN;
			return false;
	}
map_pte:
	pte = pte_offset_map(pmd, address);
	if (!pte_present(*pte)) {
		pte_unmap(pte);
		return SWAP_AGAIN;
		return false;
	}

	ptl = pte_lockptr(mm, pmd);
@@ -884,21 +872,53 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,

	if (!pte_present(*pte)) {
		pte_unmap_unlock(pte, ptl);
		return SWAP_AGAIN;
		return false;
	}

	/* THP can be referenced by any subpage */
	if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
		pte_unmap_unlock(pte, ptl);
		return SWAP_AGAIN;
		return false;
	}
found:
	*ptep = pte;
	*pmdp = pmd;
	*ptlp = ptl;
	return true;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

struct page_referenced_arg {
	int mapcount;
	int referenced;
	unsigned long vm_flags;
	struct mem_cgroup *memcg;
};
/*
 * arg: page_referenced_arg will be passed
 */
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
			unsigned long address, void *arg)
{
	struct mm_struct *mm = vma->vm_mm;
	struct page_referenced_arg *pra = arg;
	pmd_t *pmd;
	pte_t *pte;
	spinlock_t *ptl;
	int referenced = 0;

	if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
		return SWAP_AGAIN;

	if (vma->vm_flags & VM_LOCKED) {
		pte_unmap_unlock(pte, ptl);
		if (pte)
			pte_unmap(pte);
		spin_unlock(ptl);
		pra->vm_flags |= VM_LOCKED;
		return SWAP_FAIL; /* To break the loop */
	}

	if (pte) {
		if (ptep_clear_flush_young_notify(vma, address, pte)) {
			/*
			 * Don't treat a reference through a sequentially read
@@ -910,9 +930,16 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
			if (likely(!(vma->vm_flags & VM_SEQ_READ)))
				referenced++;
		}
	pte_unmap_unlock(pte, ptl);
		pte_unmap(pte);
	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
		if (pmdp_clear_flush_young_notify(vma, address, pmd))
			referenced++;
	} else {
		/* unexpected pmd-mapped page? */
		WARN_ON_ONCE(1);
	}
	spin_unlock(ptl);

found:
	if (referenced)
		clear_page_idle(page);
	if (test_and_clear_page_young(page))