Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29409997 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt
Browse files

powerpc: move find_linux_pte_or_hugepte and gup_hugepte to common code



We will use this in the later patch for handling THP pages

Reviewed-by: default avatarDavid Gibson <dwg@au1.ibm.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 074c2eae
Loading
Loading
Loading
Loading
+7 −1
Original line number Original line Diff line number Diff line
@@ -191,8 +191,14 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
				      unsigned long vmaddr)
				      unsigned long vmaddr)
{
{
}
}
#endif /* CONFIG_HUGETLB_PAGE */


#define hugepd_shift(x) 0
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
				    unsigned pdshift)
{
	return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */


/*
/*
 * FSL Book3E platforms require special gpage handling - the gpages
 * FSL Book3E platforms require special gpage handling - the gpages
+0 −13
Original line number Original line Diff line number Diff line
@@ -368,19 +368,6 @@ static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
	return pt;
	return pt;
}
}


#ifdef CONFIG_HUGETLB_PAGE
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
				 unsigned *shift);
#else
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
					       unsigned *shift)
{
	if (shift)
		*shift = 0;
	return find_linux_pte(pgdir, ea);
}
#endif /* !CONFIG_HUGETLB_PAGE */

#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLY__ */


/*
/*
+2 −0
Original line number Original line Diff line number Diff line
@@ -224,6 +224,8 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#define pmd_large(pmd)		0
#define pmd_large(pmd)		0
#define has_transparent_hugepage() 0
#define has_transparent_hugepage() 0
#endif
#endif
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
				 unsigned *shift);
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLY__ */


#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
+1 −1
Original line number Original line Diff line number Diff line
@@ -27,8 +27,8 @@ obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E)	+= fsl_booke_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E)	+= fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES)	+= slice.o
obj-$(CONFIG_PPC_MM_SLICES)	+= slice.o
ifeq ($(CONFIG_HUGETLB_PAGE),y)
obj-y				+= hugetlbpage.o
obj-y				+= hugetlbpage.o
ifeq ($(CONFIG_HUGETLB_PAGE),y)
obj-$(CONFIG_PPC_STD_MMU_64)	+= hugetlbpage-hash64.o
obj-$(CONFIG_PPC_STD_MMU_64)	+= hugetlbpage-hash64.o
obj-$(CONFIG_PPC_BOOK3E_MMU)	+= hugetlbpage-book3e.o
obj-$(CONFIG_PPC_BOOK3E_MMU)	+= hugetlbpage-book3e.o
endif
endif
+128 −123
Original line number Original line Diff line number Diff line
@@ -21,6 +21,9 @@
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/setup.h>
#include <asm/hugetlb.h>

#ifdef CONFIG_HUGETLB_PAGE


#define PAGE_SHIFT_64K	16
#define PAGE_SHIFT_64K	16
#define PAGE_SHIFT_16M	24
#define PAGE_SHIFT_16M	24
@@ -100,66 +103,6 @@ int pgd_huge(pgd_t pgd)
}
}
#endif
#endif


/*
 * We have 4 cases for pgds and pmds:
 * (1) invalid (all zeroes)
 * (2) pointer to next table, as normal; bottom 6 bits == 0
 * (3) leaf pte for huge page, bottom two bits != 00
 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
 */
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *ret_pte;
	hugepd_t *hpdp = NULL;
	unsigned pdshift = PGDIR_SHIFT;

	if (shift)
		*shift = 0;

	pg = pgdir + pgd_index(ea);

	if (pgd_huge(*pg)) {
		ret_pte = (pte_t *) pg;
		goto out;
	} else if (is_hugepd(pg))
		hpdp = (hugepd_t *)pg;
	else if (!pgd_none(*pg)) {
		pdshift = PUD_SHIFT;
		pu = pud_offset(pg, ea);

		if (pud_huge(*pu)) {
			ret_pte = (pte_t *) pu;
			goto out;
		} else if (is_hugepd(pu))
			hpdp = (hugepd_t *)pu;
		else if (!pud_none(*pu)) {
			pdshift = PMD_SHIFT;
			pm = pmd_offset(pu, ea);

			if (pmd_huge(*pm)) {
				ret_pte = (pte_t *) pm;
				goto out;
			} else if (is_hugepd(pm))
				hpdp = (hugepd_t *)pm;
			else if (!pmd_none(*pm))
				return pte_offset_kernel(pm, ea);
		}
	}
	if (!hpdp)
		return NULL;

	ret_pte = hugepte_offset(hpdp, ea, pdshift);
	pdshift = hugepd_shift(*hpdp);
out:
	if (shift)
		*shift = pdshift;
	return ret_pte;
}
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);

pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
{
	return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
	return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
@@ -753,69 +696,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
	return NULL;
	return NULL;
}
}


int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask;
	unsigned long pte_end;
	struct page *head, *page, *tail;
	pte_t pte;
	int refs;

	pte_end = (addr + sz) & ~(sz-1);
	if (pte_end < end)
		end = pte_end;

	pte = *ptep;
	mask = _PAGE_PRESENT | _PAGE_USER;
	if (write)
		mask |= _PAGE_RW;

	if ((pte_val(pte) & mask) != mask)
		return 0;

	/* hugepages are never "special" */
	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

	refs = 0;
	head = pte_page(pte);

	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
	tail = page;
	do {
		VM_BUG_ON(compound_head(page) != head);
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
		/* Could be optimized better */
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

	/*
	 * Any tail page need their mapcount reference taken before we
	 * return.
	 */
	while (refs--) {
		if (PageTail(tail))
			get_huge_page_tail(tail);
		tail++;
	}

	return 1;
}

static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
				      unsigned long sz)
				      unsigned long sz)
{
{
@@ -1032,3 +912,128 @@ void flush_dcache_icache_hugepage(struct page *page)
		}
		}
	}
	}
}
}

#endif /* CONFIG_HUGETLB_PAGE */

/*
 * We have 4 cases for pgds and pmds:
 * (1) invalid (all zeroes)
 * (2) pointer to next table, as normal; bottom 6 bits == 0
 * (3) leaf pte for huge page, bottom two bits != 00
 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
 */
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *ret_pte;
	hugepd_t *hpdp = NULL;
	unsigned pdshift = PGDIR_SHIFT;

	if (shift)
		*shift = 0;

	pg = pgdir + pgd_index(ea);

	if (pgd_huge(*pg)) {
		ret_pte = (pte_t *) pg;
		goto out;
	} else if (is_hugepd(pg))
		hpdp = (hugepd_t *)pg;
	else if (!pgd_none(*pg)) {
		pdshift = PUD_SHIFT;
		pu = pud_offset(pg, ea);

		if (pud_huge(*pu)) {
			ret_pte = (pte_t *) pu;
			goto out;
		} else if (is_hugepd(pu))
			hpdp = (hugepd_t *)pu;
		else if (!pud_none(*pu)) {
			pdshift = PMD_SHIFT;
			pm = pmd_offset(pu, ea);

			if (pmd_huge(*pm)) {
				ret_pte = (pte_t *) pm;
				goto out;
			} else if (is_hugepd(pm))
				hpdp = (hugepd_t *)pm;
			else if (!pmd_none(*pm))
				return pte_offset_kernel(pm, ea);
		}
	}
	if (!hpdp)
		return NULL;

	ret_pte = hugepte_offset(hpdp, ea, pdshift);
	pdshift = hugepd_shift(*hpdp);
out:
	if (shift)
		*shift = pdshift;
	return ret_pte;
}
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);

int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask;
	unsigned long pte_end;
	struct page *head, *page, *tail;
	pte_t pte;
	int refs;

	pte_end = (addr + sz) & ~(sz-1);
	if (pte_end < end)
		end = pte_end;

	pte = *ptep;
	mask = _PAGE_PRESENT | _PAGE_USER;
	if (write)
		mask |= _PAGE_RW;

	if ((pte_val(pte) & mask) != mask)
		return 0;

	/* hugepages are never "special" */
	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

	refs = 0;
	head = pte_page(pte);

	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
	tail = page;
	do {
		VM_BUG_ON(compound_head(page) != head);
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
		/* Could be optimized better */
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

	/*
	 * Any tail page need their mapcount reference taken before we
	 * return.
	 */
	while (refs--) {
		if (PageTail(tail))
			get_huge_page_tail(tail);
		tail++;
	}

	return 1;
}