Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7bb4b6d authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm: add p[te|md] protnone helpers for use by NUMA balancing



This is a preparatory patch that introduces protnone helpers for automatic
NUMA balancing.

Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Tested-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Jones <davej@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5d833062
Loading
Loading
Loading
Loading
+16 −0
Original line number Original line Diff line number Diff line
@@ -40,6 +40,22 @@ static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK)
static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }


#ifdef CONFIG_NUMA_BALANCING
#ifdef CONFIG_NUMA_BALANCING
/*
 * These work without NUMA balancing but the kernel does not care. See the
 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
 * work for user pages and always return true for kernel pages.
 */
static inline int pte_protnone(pte_t pte)
{
	return (pte_val(pte) &
		(_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
}

static inline int pmd_protnone(pmd_t pmd)
{
	return pte_protnone(pmd_pte(pmd));
}

static inline int pte_present(pte_t pte)
static inline int pte_present(pte_t pte)
{
{
	return pte_val(pte) & _PAGE_NUMA_MASK;
	return pte_val(pte) & _PAGE_NUMA_MASK;
+16 −0
Original line number Original line Diff line number Diff line
@@ -483,6 +483,22 @@ static inline int pmd_present(pmd_t pmd)
				 _PAGE_NUMA);
				 _PAGE_NUMA);
}
}


#ifdef CONFIG_NUMA_BALANCING
/*
 * These work without NUMA balancing but the kernel does not care. See the
 * comment in include/asm-generic/pgtable.h
 */
static inline int pte_protnone(pte_t pte)
{
	return pte_flags(pte) & _PAGE_PROTNONE;
}

static inline int pmd_protnone(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_PROTNONE;
}
#endif /* CONFIG_NUMA_BALANCING */

static inline int pmd_none(pmd_t pmd)
static inline int pmd_none(pmd_t pmd)
{
{
	/* Only check low word on 32-bit platforms, since it might be
	/* Only check low word on 32-bit platforms, since it might be
+20 −0
Original line number Original line Diff line number Diff line
@@ -673,6 +673,26 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#endif
#endif
}
}


#ifndef CONFIG_NUMA_BALANCING
/*
 * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
 * the only case the kernel cares is for NUMA balancing and is only ever set
 * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
 * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
 * is the responsibility of the caller to distinguish between PROT_NONE
 * protections and NUMA hinting fault protections.
 */
static inline int pte_protnone(pte_t pte)
{
	return 0;
}

static inline int pmd_protnone(pmd_t pmd)
{
	return 0;
}
#endif /* CONFIG_NUMA_BALANCING */

#ifdef CONFIG_NUMA_BALANCING
#ifdef CONFIG_NUMA_BALANCING
/*
/*
 * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
 * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that