Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2fd83038 authored by Helge Deller's avatar Helge Deller Committed by Kyle McMartin
Browse files

[PARISC] Further work for multiple page sizes



More work towards supporing multiple page sizes on 64-bit. Convert
some assumptions that 64bit uses 3 level page tables into testing
PT_NLEVELS. Also some BUG() to BUG_ON() conversions and some cleanups
to assembler.

Signed-off-by: default avatarHelge Deller <deller@parisc-linux.org>
Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent d668da80
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -138,6 +138,37 @@ config 64BIT
	  enable this option otherwise. The 64bit kernel is significantly bigger
	  and slower than the 32bit one.

choice
	prompt "Kernel page size"
	default PARISC_PAGE_SIZE_4KB  if !64BIT
	default PARISC_PAGE_SIZE_4KB  if 64BIT
#	default PARISC_PAGE_SIZE_16KB if 64BIT

config PARISC_PAGE_SIZE_4KB
	bool "4KB"
	help
	  This lets you select the page size of the kernel.  For best
	  performance, a page size of 16KB is recommended.  For best
	  compatibility with 32bit applications, a page size of 4KB should be
	  selected (the vast majority of 32bit binaries work perfectly fine
	  with a larger page size).

	  4KB                For best 32bit compatibility
	  16KB               For best performance
	  64KB               For best performance, might give more overhead.

	  If you don't know what to do, choose 4KB.

config PARISC_PAGE_SIZE_16KB
	bool "16KB (EXPERIMENTAL)"
	depends on PA8X00 && EXPERIMENTAL

config PARISC_PAGE_SIZE_64KB
	bool "64KB (EXPERIMENTAL)"
	depends on PA8X00 && EXPERIMENTAL

endchoice

config SMP
	bool "Symmetric multi-processing support"
	---help---
+3 −0
Original line number Diff line number Diff line
@@ -288,8 +288,11 @@ int main(void)
	DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
	DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
	DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
	DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
	DEFINE(ASM_PT_INITIAL, PT_INITIAL);
	DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
	DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64);
	DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128);
	BLANK();
	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+22 −14
Original line number Diff line number Diff line
@@ -502,18 +502,20 @@
	 * all ILP32 processes and all the kernel for machines with
	 * under 4GB of memory) */
	.macro		L3_ptep pgd,pte,index,va,fault
#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
	copy		%r0,\pte
	extrd,u,*=	\va,31,32,%r0
	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
	ldw,s		\index(\pgd),\pgd
	extrd,u,*=	\va,31,32,%r0
	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
	extrd,u,*=	\va,31,32,%r0
	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
	shld		\pgd,PxD_VALUE_SHIFT,\index
	extrd,u,*=	\va,31,32,%r0
	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
	copy		\index,\pgd
	extrd,u,*<>	\va,31,32,%r0
	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
#endif
	L2_ptep		\pgd,\pte,\index,\va,\fault
	.endm

@@ -563,10 +565,18 @@
	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */

	/* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */
	/* Enforce uncacheable pages.
	 * This should ONLY be use for MMIO on PA 2.0 machines.
	 * Memory/DMA is cache coherent on all PA2.0 machines we support
	 * (that means T-class is NOT supported) and the memory controllers
	 * on most of those machines only handles cache transactions.
	 */
	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
	depi		1,12,1,\prot

	depd		%r0,63,PAGE_SHIFT,\pte
	extrd,s		\pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
	.endm

	/* Identical macro to make_insert_tlb above, except it
@@ -584,9 +594,8 @@

	/* Get rid of prot bits and convert to page addr for iitlba */

	depi		0,31,PAGE_SHIFT,\pte
	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
	extru		\pte,24,25,\pte

	.endm

	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
@@ -1201,10 +1210,9 @@ intr_save:
	 */

	/* adjust isr/ior. */

	extrd,u         %r16,63,7,%r1    /* get high bits from isr for ior */
	depd            %r1,31,7,%r17    /* deposit them into ior */
	depdi           0,63,7,%r16      /* clear them from isr */
	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
#endif
	STREG           %r16, PT_ISR(%r29)
	STREG           %r17, PT_IOR(%r29)
+8 −7
Original line number Diff line number Diff line
@@ -76,7 +76,7 @@ $bss_loop:
	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
	mtctl		%r4,%cr25	/* Initialize user root pointer */

#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
	/* Set pmd in pgd */
	load32		PA(pmd0),%r5
	shrd            %r5,PxD_VALUE_SHIFT,%r3	
@@ -99,7 +99,7 @@ $bss_loop:
	stw		%r3,0(%r4)
	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
	addib,>		-1,%r1,1b
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -107,13 +107,14 @@ $bss_loop:


	/* Now initialize the PTEs themselves */
	ldo		_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
	ldo		0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
	load32		PA(pg0),%r1

$pgt_fill_loop:
	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
	ldo		ASM_PAGE_SIZE(%r3),%r3
	bb,>=		%r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
	addib,>		-1,%r11,$pgt_fill_loop
	nop

	/* Load the return address...er...crash 'n burn */
+5 −5
Original line number Diff line number Diff line
@@ -53,17 +53,17 @@ union thread_union init_thread_union
	__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
		{ INIT_THREAD_INFO(init_task) };

#ifdef __LP64__
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
 * guarantee that global objects will be laid out in memory in the same order 
 * as the order of declaration, so put these in different sections and use
 * the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, };

pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, };
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte")))  = { {0}, };

pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE)));

/*
 * Initial task structure.
Loading