Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d942466 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm: convert p[te|md]_mknonnuma and remaining page table manipulations



With PROT_NONE, the traditional page table manipulation functions are
sufficient.

[andre.przywara@arm.com: fix compiler warning in pmdp_invalidate()]
[akpm@linux-foundation.org: fix build with STRICT_MM_TYPECHECKS]
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarAneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Tested-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Jones <davej@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 842915f5
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -257,7 +257,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)

/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
#define pmd_mknotpresent(pmd)	(__pmd(0))
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
	return __pmd(0);
}

static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
+1 −2
Original line number Diff line number Diff line
@@ -31,8 +31,7 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
			 unsigned long new_addr, unsigned long old_end,
			 pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, pgprot_t newprot,
			int prot_numa);
			unsigned long addr, pgprot_t newprot);

enum transparent_hugepage_flag {
	TRANSPARENT_HUGEPAGE_FLAG,
+7 −26
Original line number Diff line number Diff line
@@ -1355,9 +1355,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
	goto out;
clear_pmdnuma:
	BUG_ON(!PageLocked(page));
	pmd = pmd_mknonnuma(pmd);
	pmd = pmd_modify(pmd, vma->vm_page_prot);
	set_pmd_at(mm, haddr, pmdp, pmd);
	VM_BUG_ON(pmd_protnone(*pmdp));
	update_mmu_cache_pmd(vma, addr, pmdp);
	unlock_page(page);
out_unlock:
@@ -1472,7 +1471,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
 *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
 */
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
		unsigned long addr, pgprot_t newprot, int prot_numa)
		unsigned long addr, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
@@ -1481,29 +1480,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
		pmd_t entry;
		ret = 1;
		if (!prot_numa) {
		entry = pmdp_get_and_clear_notify(mm, addr, pmd);
			if (pmd_protnone(entry))
				entry = pmd_mknonnuma(entry);
		entry = pmd_modify(entry, newprot);
		ret = HPAGE_PMD_NR;
		set_pmd_at(mm, addr, pmd, entry);
		BUG_ON(pmd_write(entry));
		} else {
			struct page *page = pmd_page(*pmd);

			/*
			 * Do not trap faults against the zero page. The
			 * read-only data is likely to be read-cached on the
			 * local CPU cache and it is less useful to know about
			 * local vs remote hits on the zero page.
			 */
			if (!is_huge_zero_page(page) &&
			    !pmd_protnone(*pmd)) {
				pmdp_set_numa(mm, addr, pmd);
				ret = HPAGE_PMD_NR;
			}
		}
		spin_unlock(ptl);
	}

+6 −4
Original line number Diff line number Diff line
@@ -3018,9 +3018,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
	* validation through pte_unmap_same(). It's of NUMA type but
	* the pfn may be screwed if the read is non atomic.
	*
	* ptep_modify_prot_start is not called as this is clearing
	* the _PAGE_NUMA bit and it is not really expected that there
	* would be concurrent hardware modifications to the PTE.
	* We can safely just do a "set_pte_at()", because the old
	* page table entry is not accessible, so there would be no
	* concurrent hardware modifications to the PTE.
	*/
	ptl = pte_lockptr(mm, pmd);
	spin_lock(ptl);
@@ -3029,7 +3029,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
		goto out;
	}

	pte = pte_mknonnuma(pte);
	/* Make it present again */
	pte = pte_modify(pte, vma->vm_page_prot);
	pte = pte_mkyoung(pte);
	set_pte_at(mm, addr, ptep, pte);
	update_mmu_cache(vma, addr, ptep);

+1 −1
Original line number Diff line number Diff line
@@ -569,7 +569,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{
	int nr_updated;

	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
	if (nr_updated)
		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);

Loading