Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8e7a33d authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Martin Schwidefsky
Browse files

s390/thp: respect page protection in pmd_none() and pmd_present()



Similar to pte_none() and pte_present(), the pmd functions should also
respect page protection of huge pages, especially PROT_NONE.
This patch also simplifies massage_pgprot_pmd() by adding new definitions
for huge page protection.

Signed-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 156152f8
Loading
Loading
Loading
Loading
+22 −13
Original line number Diff line number Diff line
@@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud)

static inline int pmd_present(pmd_t pmd)
{
	return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
	unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
	return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
	       !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
}

static inline int pmd_none(pmd_t pmd)
{
	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
	       !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
}

static inline int pmd_large(pmd_t pmd)
@@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE

#define SEGMENT_NONE	__pgprot(_HPAGE_TYPE_NONE)
#define SEGMENT_RO	__pgprot(_HPAGE_TYPE_RO)
#define SEGMENT_RW	__pgprot(_HPAGE_TYPE_RW)

#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);

@@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,

static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
	unsigned long pgprot_pmd = 0;

	if (pgprot_val(pgprot) & _PAGE_INVALID) {
		if (pgprot_val(pgprot) & _PAGE_SWT)
			pgprot_pmd |= _HPAGE_TYPE_NONE;
		pgprot_pmd |= _SEGMENT_ENTRY_INV;
	}
	if (pgprot_val(pgprot) & _PAGE_RO)
		pgprot_pmd |= _SEGMENT_ENTRY_RO;
	return pgprot_pmd;
	/*
	 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
	 * Convert to segment table entry format.
	 */
	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
		return pgprot_val(SEGMENT_NONE);
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
		return pgprot_val(SEGMENT_RO);
	return pgprot_val(SEGMENT_RW);
}

static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@@ -1269,6 +1276,8 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	/* Do not clobber _HPAGE_TYPE_NONE pages! */
	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
	return pmd;
}