Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94171b19 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm: Rename find_linux_pte_or_hugepte()



Add newer helpers to make the function usage simpler. It is always
recommended to use find_current_mm_pte() for walking the page table.
If we cannot use find_current_mm_pte(), it should be documented why
the said usage of __find_linux_pte() is safe against a parallel THP
split.

For now we have KVM code using __find_linux_pte(). This is because kvm
code ends up calling __find_linux_pte() in real mode with MSR_EE=0 but
with PACA soft_enabled = 1. We may want to fix that later and make
sure we keep the MSR_EE and PACA soft_enabled in sync. When we do that
we can switch kvm to use find_linux_pte().

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 520eccdf
Loading
Loading
Loading
Loading
+1 −9
Original line number Original line Diff line number Diff line
@@ -66,16 +66,8 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd)		0
#define pmd_large(pmd)		0
#endif
#endif
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
				   bool *is_thp, unsigned *shift);
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
					       bool *is_thp, unsigned *shift)
{
	VM_WARN(!arch_irqs_disabled(),
		"%s called with irq enabled\n", __func__);
	return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
}


/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
unsigned long vmalloc_to_phys(void *vmalloc_addr);


void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+35 −0
Original line number Original line Diff line number Diff line
#ifndef _ASM_POWERPC_PTE_WALK_H
#define _ASM_POWERPC_PTE_WALK_H

#include <linux/sched.h>

/* Don't use this directly */
extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
			       bool *is_thp, unsigned *hshift);

static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
				    bool *is_thp, unsigned *hshift)
{
	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
	return __find_linux_pte(pgdir, ea, is_thp, hshift);
}

static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
{
	pgd_t *pgdir = init_mm.pgd;
	return __find_linux_pte(pgdir, ea, NULL, hshift);
}
/*
 * This is what we should always use. Any other lockless page table lookup needs
 * careful audit against THP split.
 */
static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
					 bool *is_thp, unsigned *hshift)
{
	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
	VM_WARN(pgdir != current->mm->pgd,
		"%s lock less page table lookup called on wrong mm\n", __func__);
	return __find_linux_pte(pgdir, ea, is_thp, hshift);
}

#endif /* _ASM_POWERPC_PTE_WALK_H */
+2 −2
Original line number Original line Diff line number Diff line
@@ -44,6 +44,7 @@
#include <asm/machdep.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
#include <asm/rtas.h>
#include <asm/pte-walk.h>




/** Overview:
/** Overview:
@@ -352,8 +353,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
	 * worried about _PAGE_SPLITTING/collapse. Also we will not hit
	 * worried about _PAGE_SPLITTING/collapse. Also we will not hit
	 * page table free, because of init_mm.
	 * page table free, because of init_mm.
	 */
	 */
	ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token,
	ptep = find_init_mm_pte(token, &hugepage_shift);
					   NULL, &hugepage_shift);
	if (!ptep)
	if (!ptep)
		return token;
		return token;
	WARN_ON(hugepage_shift);
	WARN_ON(hugepage_shift);
+3 −2
Original line number Original line Diff line number Diff line
@@ -19,6 +19,8 @@
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
#include <asm/io-workarounds.h>
#include <asm/pte-walk.h>



#define IOWA_MAX_BUS	8
#define IOWA_MAX_BUS	8


@@ -75,8 +77,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
		 * We won't find huge pages here (iomem). Also can't hit
		 * We won't find huge pages here (iomem). Also can't hit
		 * a page table free due to init_mm
		 * a page table free due to init_mm
		 */
		 */
		ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
		ptep = find_init_mm_pte(vaddr, &hugepage_shift);
						   NULL, &hugepage_shift);
		if (ptep == NULL)
		if (ptep == NULL)
			paddr = 0;
			paddr = 0;
		else {
		else {
+3 −2
Original line number Original line Diff line number Diff line
@@ -37,6 +37,7 @@
#include <asm/synch.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
#include <asm/cputable.h>
#include <asm/pte-walk.h>


#include "trace_hv.h"
#include "trace_hv.h"


@@ -597,7 +598,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
			 * hugepage split and collapse.
			 * hugepage split and collapse.
			 */
			 */
			local_irq_save(flags);
			local_irq_save(flags);
			ptep = find_linux_pte_or_hugepte(current->mm->pgd,
			ptep = find_current_mm_pte(current->mm->pgd,
						   hva, NULL, NULL);
						   hva, NULL, NULL);
			if (ptep) {
			if (ptep) {
				pte = kvmppc_read_update_linux_pte(ptep, 1);
				pte = kvmppc_read_update_linux_pte(ptep, 1);
Loading