Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b2d43834 authored by David S. Miller's avatar David S. Miller
Browse files

sparc64: Make PAGE_OFFSET variable.



Choose PAGE_OFFSET dynamically based upon cpu type.

Original UltraSPARC-I (spitfire) chips only supported a 44-bit
virtual address space.

Newer chips (T4 and later) support 52-bit virtual addresses
and up to 47-bits of physical memory space.

Therefore we have to adjust PAGE_SIZE dynamically based upon
the capabilities of the chip.

Note that this change alone does not allow us to support > 43-bit
physical memory, to do that we need to re-arrange our page table
support.  The current encodings of the pmd_t and pgd_t pointers
restricts us to "32 + 11" == 43 bits.

This change can waste quite a bit of memory for the various tables.
In particular, a future change should work to size and allocate
kern_linear_bitmap[] and sparc64_valid_addr_bitmap[] dynamically.
This isn't easy as we really cannot take a TLB miss when accessing
kern_linear_bitmap[].  We'd have to lock it into the TLB or similar.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarBob Picco <bob.picco@oracle.com>
parent f998c9c0
Loading
Loading
Loading
Loading
+6 −14
Original line number Diff line number Diff line
@@ -112,24 +112,16 @@ typedef pte_t *pgtable_t;

#include <asm-generic/memory_model.h>

#endif /* !(__ASSEMBLY__) */

/* We used to stick this into a hard-coded global register (%g4)
 * but that does not make sense anymore.
 */
#define MAX_SUPPORTED_PA_BITS	43
#define PAGE_OFFSET_BY_BITS(X)	(-(_AC(1,UL) << (X)))
#define PAGE_OFFSET		PAGE_OFFSET_BY_BITS(MAX_SUPPORTED_PA_BITS)
extern unsigned long PAGE_OFFSET;

/* The "virtual" portion of PAGE_OFFSET, used to clip off the non-physical
 * bits of a linear kernel address.
 */
#define PAGE_OFFSET_VA_BITS	(64 - MAX_SUPPORTED_PA_BITS)
#endif /* !(__ASSEMBLY__) */

/* The actual number of physical memory address bits we support, this is
 * used to size various tables used to manage kernel TLB misses.
/* The maximum number of physical memory address bits we support, this
 * is used to size various tables used to manage kernel TLB misses and
 * also the sparsemem code.
 */
#define MAX_PHYS_ADDRESS_BITS	41
#define MAX_PHYS_ADDRESS_BITS	47

/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
 * and kpte_linear_bitmap.
+25 −5
Original line number Diff line number Diff line
@@ -153,12 +153,19 @@ kvmap_dtlb_tsb4m_miss:
	/* Clear the PAGE_OFFSET top virtual bits, shift
	 * down to get PFN, and make sure PFN is in range.
	 */
	sllx		%g4, PAGE_OFFSET_VA_BITS, %g5
661:	sllx		%g4, 0, %g5
	.section	.page_offset_shift_patch, "ax"
	.word		661b
	.previous

	/* Check to see if we know about valid memory at the 4MB
	 * chunk this physical address will reside within.
	 */
	srlx		%g5, PAGE_OFFSET_VA_BITS + MAX_PHYS_ADDRESS_BITS, %g2
661:	srlx		%g5, MAX_PHYS_ADDRESS_BITS, %g2
	.section	.page_offset_shift_patch, "ax"
	.word		661b
	.previous

	brnz,pn		%g2, kvmap_dtlb_longpath
	 nop

@@ -176,7 +183,11 @@ valid_addr_bitmap_patch:
	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
	.previous

	srlx		%g5, PAGE_OFFSET_VA_BITS + ILOG2_4MB, %g2
661:	srlx		%g5, ILOG2_4MB, %g2
	.section	.page_offset_shift_patch, "ax"
	.word		661b
	.previous

	srlx		%g2, 6, %g5
	and		%g2, 63, %g2
	sllx		%g5, 3, %g5
@@ -189,9 +200,18 @@ valid_addr_bitmap_patch:
2:	 sethi		%hi(kpte_linear_bitmap), %g2

	/* Get the 256MB physical address index. */
	sllx		%g4, PAGE_OFFSET_VA_BITS, %g5
661:	sllx		%g4, 0, %g5
	.section	.page_offset_shift_patch, "ax"
	.word		661b
	.previous

	or		%g2, %lo(kpte_linear_bitmap), %g2
	srlx		%g5, PAGE_OFFSET_VA_BITS + ILOG2_256MB, %g5

661:	srlx		%g5, ILOG2_256MB, %g5
	.section	.page_offset_shift_patch, "ax"
	.word		661b
	.previous

	and		%g5, (32 - 1), %g7

	/* Divide by 32 to get the offset into the bitmask.  */
+5 −0
Original line number Diff line number Diff line
@@ -122,6 +122,11 @@ SECTIONS
		*(.swapper_4m_tsb_phys_patch)
		__swapper_4m_tsb_phys_patch_end = .;
	}
	.page_offset_shift_patch : {
		__page_offset_shift_patch = .;
		*(.page_offset_shift_patch)
		__page_offset_shift_patch_end = .;
	}
	.popc_3insn_patch : {
		__popc_3insn_patch = .;
		*(.popc_3insn_patch)
+2 −2
Original line number Diff line number Diff line
@@ -37,10 +37,10 @@ _clear_page: /* %o0=dest */
	.globl		clear_user_page
clear_user_page:	/* %o0=dest, %o1=vaddr */
	lduw		[%g6 + TI_PRE_COUNT], %o2
	sethi		%uhi(PAGE_OFFSET), %g2
	sethi		%hi(PAGE_OFFSET), %g2
	sethi		%hi(PAGE_SIZE), %o4

	sllx		%g2, 32, %g2
	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
	sethi		%hi(PAGE_KERNEL_LOCKED), %g3

	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+2 −2
Original line number Diff line number Diff line
@@ -46,10 +46,10 @@
	.type		copy_user_page,#function
copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
	lduw		[%g6 + TI_PRE_COUNT], %o4
	sethi		%uhi(PAGE_OFFSET), %g2
	sethi		%hi(PAGE_OFFSET), %g2
	sethi		%hi(PAGE_SIZE), %o3

	sllx		%g2, 32, %g2
	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
	sethi		%hi(PAGE_KERNEL_LOCKED), %g3

	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
Loading