Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ebd31197 authored by Oliver O'Halloran's avatar Oliver O'Halloran Committed by Michael Ellerman
Browse files

powerpc/mm: Add devmap support for ppc64



Add support for the devmap bit on PTEs and PMDs for PPC64 Book3S.  This
is used to differentiate device backed memory from transparent huge
pages since they are handled in more or less the same manner by the core
mm code.

Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarOliver O'Halloran <oohall@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent b584c254
Loading
Loading
Loading
Loading
+45 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@

#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#include <linux/bug.h>
#endif

/*
@@ -79,6 +80,9 @@

#define _PAGE_SOFT_DIRTY	_RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL		_RPAGE_SW2 /* software: special page */
#define _PAGE_DEVMAP		_RPAGE_SW1 /* software: ZONE_DEVICE page */
#define __HAVE_ARCH_PTE_DEVMAP

/*
 * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
 * Instead of fixing all of them, add an alternate define which
@@ -599,6 +603,16 @@ static inline pte_t pte_mkhuge(pte_t pte)
	return pte;
}

static inline pte_t pte_mkdevmap(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
}

static inline int pte_devmap(pte_t pte)
{
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP));
}

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	/* FIXME!! check whether this need to be a conditional */
@@ -1146,6 +1160,37 @@ static inline bool arch_needs_pgtable_deposit(void)
	return true;
}


static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
}

static inline int pmd_devmap(pmd_t pmd)
{
	return pte_devmap(pmd_pte(pmd));
}

static inline int pud_devmap(pud_t pud)
{
	return 0;
}

static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

static inline const int pud_pfn(pud_t pud)
{
	/*
	 * Currently all calls to pud_pfn() are gated around a pud_devmap()
	 * check so this should never be used. If it grows another user we
	 * want to know about it.
	 */
	BUILD_BUG();
	return 0;
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
+1 −1
Original line number Diff line number Diff line
@@ -252,7 +252,7 @@ static inline int radix__pgd_bad(pgd_t pgd)

static inline int radix__pmd_trans_huge(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_PTE);
	return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
}

static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
+1 −1
Original line number Diff line number Diff line
@@ -964,7 +964,7 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
			if (pmd_none(pmd))
				return NULL;

			if (pmd_trans_huge(pmd)) {
			if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
				if (is_thp)
					*is_thp = true;
				ret_pte = (pte_t *) pmdp;
+2 −2
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
{
	int changed;
#ifdef CONFIG_DEBUG_VM
	WARN_ON(!pmd_trans_huge(*pmdp));
	WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
	assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
	changed = !pmd_same(*(pmdp), entry);
@@ -59,7 +59,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
#ifdef CONFIG_DEBUG_VM
	WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
	assert_spin_locked(&mm->page_table_lock);
	WARN_ON(!pmd_trans_huge(pmd));
	WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
#endif
	trace_hugepage_set_pmd(addr, pmd_val(pmd));
	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
+3 −1
Original line number Diff line number Diff line
@@ -184,7 +184,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr
	unsigned long old;

#ifdef CONFIG_DEBUG_VM
	WARN_ON(!pmd_trans_huge(*pmdp));
	WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
	assert_spin_locked(&mm->page_table_lock);
#endif

@@ -216,6 +216,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres

	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	VM_BUG_ON(pmd_trans_huge(*pmdp));
	VM_BUG_ON(pmd_devmap(*pmdp));

	pmd = *pmdp;
	pmd_clear(pmdp);
@@ -296,6 +297,7 @@ void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
{
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
	VM_BUG_ON(pmd_devmap(*pmdp));

	/*
	 * We can't mark the pmd none here, because that will cause a race
Loading