Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7d54842 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm: Use _PAGE_READ to indicate Read access



This splits the _PAGE_RW bit into _PAGE_READ and _PAGE_WRITE. It also
removes the dependency on _PAGE_USER for implying read only. Few things
to note here is that, we have read implied with write and execute
permission. Hence we should always find _PAGE_READ set on hash pte
fault.

We still can't switch PROT_NONE to !(_PAGE_RWX). Auto numa depends on
marking a prot none pte _PAGE_WRITE. (For more details look at
b191f9b1 "mm: numa: preserve PTE write permissions across a NUMA
hinting fault")

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Jeremy Kerr <jk@ozlabs.org>
Cc: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Acked-by: default avatarIan Munsie <imunsie@au1.ibm.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ee3caed3
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -291,10 +291,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
				      pmd_t *pmdp)
{

	if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
	if ((pmd_val(*pmdp) & _PAGE_WRITE) == 0)
		return;

	pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
	pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
}

#endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
+21 −14
Original line number Diff line number Diff line
@@ -16,8 +16,10 @@
#define _PAGE_BIT_SWAP_TYPE	0

#define _PAGE_EXEC		0x00001 /* execute permission */
#define _PAGE_RW		0x00002 /* read & write access allowed */
#define _PAGE_WRITE		0x00002 /* write access allowed */
#define _PAGE_READ		0x00004	/* read access allowed */
#define _PAGE_RW		(_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX		(_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_USER		0x00008 /* page may be accessed by userspace */
#define _PAGE_GUARDED		0x00010 /* G: guarded (side-effect) page */
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
@@ -145,8 +147,8 @@
 */
#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
			 _PAGE_WRITETHRU | _PAGE_4K_PFN | \
			 _PAGE_USER | _PAGE_ACCESSED |  \
			 _PAGE_RW |  _PAGE_DIRTY | _PAGE_EXEC | \
			 _PAGE_USER | _PAGE_ACCESSED |  _PAGE_READ |\
			 _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_EXEC | \
			 _PAGE_SOFT_DIRTY)
/*
 * We define 2 sets of base prot bits, one for basic pages (ie,
@@ -171,10 +173,12 @@
#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
				 _PAGE_EXEC)
#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER )
#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER )
#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
				 _PAGE_EXEC)
#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
				 _PAGE_EXEC)

#define __P000	PAGE_NONE
#define __P001	PAGE_READONLY
@@ -296,19 +300,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
				      pte_t *ptep)
{

	if ((pte_val(*ptep) & _PAGE_RW) == 0)
	if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
		return;

	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
}

static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
					   unsigned long addr, pte_t *ptep)
{
	if ((pte_val(*ptep) & _PAGE_RW) == 0)
	if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
		return;

	pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
}

/*
@@ -349,7 +353,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
	__be64 old, tmp, val, mask;

	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);

	val = pte_raw(entry) & mask;
@@ -384,7 +388,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)


/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_WRITE);}
static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
@@ -445,7 +449,7 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
	return __pte(pte_val(pte) & ~_PAGE_RW);
	return __pte(pte_val(pte) & ~_PAGE_WRITE);
}

static inline pte_t pte_mkclean(pte_t pte)
@@ -460,6 +464,9 @@ static inline pte_t pte_mkold(pte_t pte)

static inline pte_t pte_mkwrite(pte_t pte)
{
	/*
	 * write implies read, hence set both
	 */
	return __pte(pte_val(pte) | _PAGE_RW);
}

+5 −0
Original line number Diff line number Diff line
@@ -198,3 +198,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/* Advertise support for _PAGE_SPECIAL */
#define __HAVE_ARCH_PTE_SPECIAL

#ifndef _PAGE_READ
/* if not defined, we should not find _PAGE_WRITE too */
#define _PAGE_READ 0
#define _PAGE_WRITE _PAGE_RW
#endif
+1 −1
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
		 * also add _PAGE_COMBO
		 */
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
		if (access & _PAGE_RW)
		if (access & _PAGE_WRITE)
			new_pte |= _PAGE_DIRTY;
	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));

+2 −2
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
		 * also add _PAGE_COMBO
		 */
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
		if (access & _PAGE_RW)
		if (access & _PAGE_WRITE)
			new_pte |= _PAGE_DIRTY;
	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));

@@ -251,7 +251,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
		 * a write access.
		 */
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
		if (access & _PAGE_RW)
		if (access & _PAGE_WRITE)
			new_pte |= _PAGE_DIRTY;
	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));

Loading