Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 117b0791 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm, thp: move ptl taking inside page_check_address_pmd()



With split page table lock we can't know which lock we need to take
before we find the relevant pmd.

Let's move lock taking inside the function.

Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarAlex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bf929152
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -54,7 +54,8 @@ enum page_check_address_pmd_flag {
extern pmd_t *page_check_address_pmd(struct page *page,
				     struct mm_struct *mm,
				     unsigned long address,
				     enum page_check_address_pmd_flag flag);
				     enum page_check_address_pmd_flag flag,
				     spinlock_t **ptl);

#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+27 −16
Original line number Diff line number Diff line
@@ -1552,23 +1552,33 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
	return 0;
}

/*
 * This function returns whether a given @page is mapped onto the @address
 * in the virtual space of @mm.
 *
 * When it's true, this function returns *pmd with holding the page table lock
 * and passing it back to the caller via @ptl.
 * If it's false, returns NULL without holding the page table lock.
 */
pmd_t *page_check_address_pmd(struct page *page,
			      struct mm_struct *mm,
			      unsigned long address,
			      enum page_check_address_pmd_flag flag)
			      enum page_check_address_pmd_flag flag,
			      spinlock_t **ptl)
{
	pmd_t *pmd, *ret = NULL;
	pmd_t *pmd;

	if (address & ~HPAGE_PMD_MASK)
		goto out;
		return NULL;

	pmd = mm_find_pmd(mm, address);
	if (!pmd)
		goto out;
		return NULL;
	*ptl = pmd_lock(mm, pmd);
	if (pmd_none(*pmd))
		goto out;
		goto unlock;
	if (pmd_page(*pmd) != page)
		goto out;
		goto unlock;
	/*
	 * split_vma() may create temporary aliased mappings. There is
	 * no risk as long as all huge pmd are found and have their
@@ -1578,14 +1588,15 @@ pmd_t *page_check_address_pmd(struct page *page,
	 */
	if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
	    pmd_trans_splitting(*pmd))
		goto out;
		goto unlock;
	if (pmd_trans_huge(*pmd)) {
		VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
			  !pmd_trans_splitting(*pmd));
		ret = pmd;
		return pmd;
	}
out:
	return ret;
unlock:
	spin_unlock(*ptl);
	return NULL;
}

static int __split_huge_page_splitting(struct page *page,
@@ -1593,6 +1604,7 @@ static int __split_huge_page_splitting(struct page *page,
				       unsigned long address)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
	pmd_t *pmd;
	int ret = 0;
	/* For mmu_notifiers */
@@ -1600,9 +1612,8 @@ static int __split_huge_page_splitting(struct page *page,
	const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;

	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
	spin_lock(&mm->page_table_lock);
	pmd = page_check_address_pmd(page, mm, address,
				     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
			PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
	if (pmd) {
		/*
		 * We can't temporarily set the pmd to null in order
@@ -1613,8 +1624,8 @@ static int __split_huge_page_splitting(struct page *page,
		 */
		pmdp_splitting_flush(vma, address, pmd);
		ret = 1;
		spin_unlock(ptl);
	}
	spin_unlock(&mm->page_table_lock);
	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);

	return ret;
@@ -1745,14 +1756,14 @@ static int __split_huge_page_map(struct page *page,
				 unsigned long address)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
	pmd_t *pmd, _pmd;
	int ret = 0, i;
	pgtable_t pgtable;
	unsigned long haddr;

	spin_lock(&mm->page_table_lock);
	pmd = page_check_address_pmd(page, mm, address,
				     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
			PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
	if (pmd) {
		pgtable = pgtable_trans_huge_withdraw(mm, pmd);
		pmd_populate(mm, &_pmd, pgtable);
@@ -1807,8 +1818,8 @@ static int __split_huge_page_map(struct page *page,
		pmdp_invalidate(vma, address, pmd);
		pmd_populate(mm, pmd, pgtable);
		ret = 1;
		spin_unlock(ptl);
	}
	spin_unlock(&mm->page_table_lock);

	return ret;
}
+5 −8
Original line number Diff line number Diff line
@@ -665,25 +665,23 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
			unsigned long *vm_flags)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
	int referenced = 0;

	if (unlikely(PageTransHuge(page))) {
		pmd_t *pmd;

		spin_lock(&mm->page_table_lock);
		/*
		 * rmap might return false positives; we must filter
		 * these out using page_check_address_pmd().
		 */
		pmd = page_check_address_pmd(page, mm, address,
					     PAGE_CHECK_ADDRESS_PMD_FLAG);
		if (!pmd) {
			spin_unlock(&mm->page_table_lock);
					     PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
		if (!pmd)
			goto out;
		}

		if (vma->vm_flags & VM_LOCKED) {
			spin_unlock(&mm->page_table_lock);
			spin_unlock(ptl);
			*mapcount = 0;	/* break early from loop */
			*vm_flags |= VM_LOCKED;
			goto out;
@@ -692,10 +690,9 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
		/* go ahead even if the pmd is pmd_trans_splitting() */
		if (pmdp_clear_flush_young_notify(vma, address, pmd))
			referenced++;
		spin_unlock(&mm->page_table_lock);
		spin_unlock(ptl);
	} else {
		pte_t *pte;
		spinlock_t *ptl;

		/*
		 * rmap might return false positives; we must filter