Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16c2d476 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras
Browse files

[POWERPC] Add ability to 4K kernel to hash in 64K pages



This adds the ability for a kernel compiled with 4K page size
to have special slices containing 64K pages and hash the right type
of hash PTEs.

Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent d0f13e3c
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -546,9 +546,15 @@ config NODES_SPAN_OTHER_NODES
	def_bool y
	depends on NEED_MULTIPLE_NODES

config PPC_HAS_HASH_64K
	bool
	depends on PPC64
	default n

config PPC_64K_PAGES
	bool "64k page size"
	depends on PPC64
	select PPC_HAS_HASH_64K
	help
	  This option changes the kernel logical page size to 64k. On machines
	  without processor support for 64k pages, the kernel will simulate
+4 −1
Original line number Diff line number Diff line
@@ -615,6 +615,9 @@ htab_pte_insert_failure:
	li	r3,-1
	b	htab_bail

#endif /* CONFIG_PPC_64K_PAGES */

#ifdef CONFIG_PPC_HAS_HASH_64K

/*****************************************************************************
 *                                                                           *
@@ -870,7 +873,7 @@ ht64_pte_insert_failure:
	b	ht64_bail


#endif /* CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC_HAS_HASH_64K */


/*****************************************************************************
+27 −12
Original line number Diff line number Diff line
@@ -420,7 +420,7 @@ static void __init htab_finish_init(void)
	extern unsigned int *htab_call_hpte_remove;
	extern unsigned int *htab_call_hpte_updatepp;

#ifdef CONFIG_PPC_64K_PAGES
#ifdef CONFIG_PPC_HAS_HASH_64K
	extern unsigned int *ht64_call_hpte_insert1;
	extern unsigned int *ht64_call_hpte_insert2;
	extern unsigned int *ht64_call_hpte_remove;
@@ -648,7 +648,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
			return 1;
		}
		vsid = get_vsid(mm->context.id, ea);
#ifdef CONFIG_PPC_MM_SLICES
		psize = get_slice_psize(mm, ea);
#else
		psize = mm->context.user_psize;
#endif
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
@@ -678,13 +682,21 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)

#ifdef CONFIG_HUGETLB_PAGE
	/* Handle hugepage regions */
	if (HPAGE_SHIFT &&
	    unlikely(get_slice_psize(mm, ea) == mmu_huge_psize)) {
	if (HPAGE_SHIFT && psize == mmu_huge_psize) {
		DBG_LOW(" -> huge page !\n");
		return hash_huge_page(mm, access, ea, vsid, local, trap);
	}
#endif /* CONFIG_HUGETLB_PAGE */

#ifndef CONFIG_PPC_64K_PAGES
	/* If we use 4K pages and our psize is not 4K, then we are hitting
	 * a special driver mapping, we need to align the address before
	 * we fetch the PTE
	 */
	if (psize != MMU_PAGE_4K)
		ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
#endif /* CONFIG_PPC_64K_PAGES */

	/* Get PTE and page size from page tables */
	ptep = find_linux_pte(pgdir, ea);
	if (ptep == NULL || !pte_present(*ptep)) {
@@ -707,9 +719,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
	}

	/* Do actual hashing */
#ifndef CONFIG_PPC_64K_PAGES
	rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
#ifdef CONFIG_PPC_64K_PAGES
	/* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
	if (pte_val(*ptep) & _PAGE_4K_PFN) {
		demote_segment_4k(mm, ea);
@@ -751,12 +761,14 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
			mmu_psize_defs[mmu_vmalloc_psize].sllp;
		slb_flush_and_rebolt();
	}
#endif /* CONFIG_PPC_64K_PAGES */

#ifdef CONFIG_PPC_HAS_HASH_64K
	if (psize == MMU_PAGE_64K)
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
#endif /* CONFIG_PPC_HAS_HASH_64K */
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
@@ -812,19 +824,22 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
	/* Get VSID */
	vsid = get_vsid(mm->context.id, ea);

	/* Hash it in */
	/* Hash doesn't like irqs */
	local_irq_save(flags);

	/* Is that local to this CPU ? */
	mask = cpumask_of_cpu(smp_processor_id());
	if (cpus_equal(mm->cpu_vm_mask, mask))
		local = 1;
#ifndef CONFIG_PPC_64K_PAGES
	__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else

	/* Hash it in */
#ifdef CONFIG_PPC_HAS_HASH_64K
	if (mm->context.user_psize == MMU_PAGE_64K)
		__hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		__hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */
		__hash_page_4K(ea, access, vsid, ptep, trap, local);

	local_irq_restore(flags);
}

+9 −3
Original line number Diff line number Diff line
@@ -143,16 +143,22 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
	 */
	addr &= PAGE_MASK;

	/* Get page size (maybe move back to caller) */
	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
		psize = mmu_huge_psize;
#else
		BUG();
		psize = pte_pagesize_index(pte); /* shutup gcc */
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
	} else
		psize = pte_pagesize_index(pte);
		psize = pte_pagesize_index(mm, addr, pte);

	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
+5 −1
Original line number Diff line number Diff line
@@ -80,7 +80,11 @@

#define pte_iterate_hashed_end() } while(0)

#define pte_pagesize_index(pte)	MMU_PAGE_4K
#ifdef CONFIG_PPC_HAS_HASH_64K
#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
#else
#define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
#endif

/*
 * 4-level page tables related bits
Loading