Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 691e95fd authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm/thp: Make page table walk safe against thp split/collapse



We can disable a THP split or a hugepage collapse by disabling irq.
We do send IPI to all the cpus in the early part of split/collapse,
and disabling local irq ensure we don't make progress with
split/collapse. If the THP is getting split we return NULL from
find_linux_pte_or_hugepte(). For all the current callers it should be ok.
We need to be careful if we want to use returned pte_t pointer outside
the irq disabled region. W.r.t to THP split, the pfn remains the same,
but then a hugepage collapse will result in a pfn change. There are
few steps we can take to avoid a hugepage collapse.One way is to take page
reference inside the irq disable region. Other option is to take
mmap_sem so that a parallel collapse will not happen. We can also
disable collapse by taking pmd_lock. Another method used by kvm
subsystem is to check whether we had a mmu_notifer update in between
using mmu_notifier_retry().

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent dac56570
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -247,8 +247,17 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#define pmd_large(pmd)		0
#define has_transparent_hugepage() 0
#endif
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
				 unsigned *shift);
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
					       unsigned *shift)
{
	if (!arch_irqs_disabled()) {
		pr_info("%s called with irq enabled\n", __func__);
		dump_stack();
	}
	return __find_linux_pte_or_hugepte(pgdir, ea, shift);
}
#endif /* __ASSEMBLY__ */

#endif /* __KERNEL__ */
+4 −2
Original line number Diff line number Diff line
@@ -334,9 +334,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
	int hugepage_shift;

	/*
	 * We won't find hugepages here, iomem
	 * We won't find hugepages here(this is iomem). Hence we are not
	 * worried about _PAGE_SPLITTING/collapse. Also we will not hit
	 * page table free, because of init_mm.
	 */
	ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
	ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
	if (!ptep)
		return token;
	WARN_ON(hugepage_shift);
+5 −5
Original line number Diff line number Diff line
@@ -71,15 +71,15 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
		vaddr = (unsigned long)PCI_FIX_ADDR(addr);
		if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
			return NULL;

		ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
		/*
		 * We won't find huge pages here (iomem). Also can't hit
		 * a page table free due to init_mm
		 */
		ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
						 &hugepage_shift);
		if (ptep == NULL)
			paddr = 0;
		else {
			/*
			 * we don't have hugepages backing iomem
			 */
			WARN_ON(hugepage_shift);
			paddr = pte_pfn(*ptep) << PAGE_SHIFT;
		}
+3 −2
Original line number Diff line number Diff line
@@ -539,12 +539,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
		if (!writing && hpte_is_writable(r)) {
			unsigned int hugepage_shift;
			pte_t *ptep, pte;
			unsigned long flags;

			/*
			 * We need to protect against page table destruction
			 * while looking up and updating the pte.
			 */
			rcu_read_lock_sched();
			local_irq_save(flags);
			ptep = find_linux_pte_or_hugepte(current->mm->pgd,
							 hva, &hugepage_shift);
			if (ptep) {
@@ -553,7 +554,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
				if (pte_write(pte))
					write_ok = 1;
			}
			rcu_read_unlock_sched();
			local_irq_restore(flags);
		}
	}

+23 −7
Original line number Diff line number Diff line
@@ -26,11 +26,14 @@ static void *real_vmalloc_addr(void *x)
{
	unsigned long addr = (unsigned long) x;
	pte_t *p;

	p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
	/*
	 * assume we don't have huge pages in vmalloc space...
	 * So don't worry about THP collapse/split. Called
	 * Only in realmode, hence won't need irq_save/restore.
	 */
	p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
	if (!p || !pte_present(*p))
		return NULL;
	/* assume we don't have huge pages in vmalloc space... */
	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
	return __va(addr);
}
@@ -153,7 +156,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
	pte_t *ptep;
	unsigned int writing;
	unsigned long mmu_seq;
	unsigned long rcbits;
	unsigned long rcbits, irq_flags = 0;

	psize = hpte_page_size(pteh, ptel);
	if (!psize)
@@ -189,7 +192,16 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,

	/* Translate to host virtual address */
	hva = __gfn_to_hva_memslot(memslot, gfn);
	/*
	 * If we had a page table table change after lookup, we would
	 * retry via mmu_notifier_retry.
	 */
	if (realmode)
		ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
	else {
		local_irq_save(irq_flags);
		ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
	}
	if (ptep) {
		pte_t pte;
		unsigned int host_pte_size;
@@ -202,9 +214,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
		 * We should always find the guest page size
		 * to <= host page size, if host is using hugepage
		 */
		if (host_pte_size < psize)
		if (host_pte_size < psize) {
			if (!realmode)
				local_irq_restore(flags);
			return H_PARAMETER;

		}
		pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
		if (pte_present(pte) && !pte_protnone(pte)) {
			if (writing && !pte_write(pte))
@@ -216,6 +230,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
			pa |= gpa & ~PAGE_MASK;
		}
	}
	if (!realmode)
		local_irq_restore(irq_flags);

	ptel &= ~(HPTE_R_PP0 - psize);
	ptel |= pa;
Loading