Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aac372de authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

parents 02d31ed2 c9c10830
Loading
Loading
Loading
Loading
+5 −9
Original line number Diff line number Diff line
@@ -53,19 +53,18 @@
 * be guaranteed to be 0 ... mmu_context.h does guarantee this
 * by only using 10 bits in the hwcontext value.
 */
#define CREATE_VPTE_OFFSET1(r1, r2)
#define CREATE_VPTE_OFFSET1(r1, r2) nop
#define CREATE_VPTE_OFFSET2(r1, r2) \
				srax	r1, 10, r2
#define CREATE_VPTE_NOP		nop
#else
#define CREATE_VPTE_OFFSET1(r1, r2) \
				srax	r1, PAGE_SHIFT, r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
				sllx	r2, 3, r2
#define CREATE_VPTE_NOP
#endif

/* DTLB ** ICACHE line 1: Quick user TLB misses		*/
	mov		TLB_SFSR, %g1
	ldxa		[%g1 + %g1] ASI_DMMU, %g4	! Get TAG_ACCESS
	andcc		%g4, TAG_CONTEXT_BITS, %g0	! From Nucleus?
from_tl1_trap:
@@ -74,18 +73,16 @@ from_tl1_trap:
	be,pn		%xcc, kvmap			! Yep, special processing
	 CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
	cmp		%g5, 4				! Last trap level?
	be,pn		%xcc, longpath			! Yep, cannot risk VPTE miss
	 nop						! delay slot

/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses	*/
	be,pn		%xcc, longpath			! Yep, cannot risk VPTE miss
	 nop						! delay slot
	ldxa		[%g3 + %g6] ASI_S, %g5		! Load VPTE
1:	brgez,pn	%g5, longpath			! Invalid, branch out
	 nop						! Delay-slot
9:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
	retry						! Trap return
	nop
	nop
	nop

/* DTLB ** ICACHE line 3: winfixups+real_faults		*/
longpath:
@@ -106,8 +103,7 @@ longpath:
	nop
	nop
	nop
	CREATE_VPTE_NOP
	nop

#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
#undef CREATE_VPTE_NOP
+6 −6
Original line number Diff line number Diff line
@@ -14,14 +14,14 @@
 */

/* PROT ** ICACHE line 1: User DTLB protection trap	*/
	stxa		%g0, [%g1] ASI_DMMU		! Clear SFSR FaultValid bit
	membar		#Sync				! Synchronize ASI stores
	rdpr		%pstate, %g5			! Move into alternate globals
	mov		TLB_SFSR, %g1
	stxa		%g0, [%g1] ASI_DMMU		! Clear FaultValid bit
	membar		#Sync				! Synchronize stores
	rdpr		%pstate, %g5			! Move into alt-globals
	wrpr		%g5, PSTATE_AG|PSTATE_MG, %pstate
	rdpr		%tl, %g1			! Need to do a winfixup?
	rdpr		%tl, %g1			! Need a winfixup?
	cmp		%g1, 1				! Trap level >1?
	mov		TLB_TAG_ACCESS, %g4		! Prepare reload of vaddr
	nop
	mov		TLB_TAG_ACCESS, %g4		! For reload of vaddr

/* PROT ** ICACHE line 2: More real fault processing */
	bgu,pn		%xcc, winfix_trampoline		! Yes, perform winfixup
+26 −35
Original line number Diff line number Diff line
@@ -28,19 +28,14 @@
#include <asm/mmu.h>
	
/* This section from from _start to sparc64_boot_end should fit into
 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000. 
 * 0x0000000000404000 to 0x0000000000408000.
 */

	.text
	.globl	start, _start, stext, _stext
_start:
start:
_stext:
stext:
bootup_user_stack:
! 0x0000000000404000
	b	sparc64_boot
	 flushw					/* Flush register file.      */
@@ -392,31 +387,30 @@ tlb_fixup_done:
	 * former does use this code, the latter does not yet due
	 * to some complexities.  That should be fixed up at some
	 * point.
	 *
	 * There used to be enormous complexity wrt. transferring
	 * over from the firwmare's trap table to the Linux kernel's.
	 * For example, there was a chicken & egg problem wrt. building
	 * the OBP page tables, yet needing to be on the Linux kernel
	 * trap table (to translate PAGE_OFFSET addresses) in order to
	 * do that.
	 *
	 * We now handle OBP tlb misses differently, via linear lookups
	 * into the prom_trans[] array.  So that specific problem no
	 * longer exists.  Yet, unfortunately there are still some issues
	 * preventing trampoline.S from using this code... ho hum.
	 */
	.globl	setup_trap_table
setup_trap_table:
	save	%sp, -192, %sp

	/* Force interrupts to be disabled.  Transferring over to
	 * the Linux trap table is a very delicate operation.
	 * Until we are actually on the Linux trap table, we cannot
	 * get the PAGE_OFFSET linear mappings translated.  We need
	 * that mapping to be setup in order to initialize the firmware
	 * page tables.
	 *
	 * So there is this window of time, from the return from
	 * prom_set_trap_table() until inherit_prom_mappings_post()
	 * (in arch/sparc64/mm/init.c) completes, during which no
	 * firmware address space accesses can be made.
	 */
	/* Force interrupts to be disabled. */
	rdpr	%pstate, %o1
	andn	%o1, PSTATE_IE, %o1
	wrpr	%o1, 0x0, %pstate
	wrpr	%g0, 15, %pil

	/* Ok, now make the final valid firmware call to jump over
	 * to the Linux trap table.
	 */
	/* Make the firmware call to jump over to the Linux trap table.  */
	call	prom_set_trap_table
	 sethi	%hi(sparc64_ttable_tl0), %o0

@@ -540,15 +534,21 @@ setup_tba: /* i0 = is_starfire */

	ret
	 restore
sparc64_boot_end:

#include "systbls.S"
#include "ktlb.S"
#include "etrap.S"
#include "rtrap.S"
#include "winfixup.S"
#include "entry.S"

/*
 * The following skips make sure the trap table in ttable.S is aligned
 * The following skip makes sure the trap table in ttable.S is aligned
 * on a 32K boundary as required by the v9 specs for TBA register.
 */
sparc64_boot_end:
	.skip	0x2000 + _start - sparc64_boot_end
bootup_user_stack_end:
	.skip	0x2000
1:
	.skip	0x4000 + _start - 1b

#ifdef CONFIG_SBUS
/* This is just a hack to fool make depend config.h discovering
@@ -560,15 +560,6 @@ bootup_user_stack_end:
! 0x0000000000408000

#include "ttable.S"
#include "systbls.S"
#include "ktlb.S"
#include "etrap.S"
#include "rtrap.S"
#include "winfixup.S"
#include "entry.S"

	/* This is just anal retentiveness on my part... */
	.align	16384

	.data
	.align	8
+11 −15
Original line number Diff line number Diff line
@@ -15,14 +15,12 @@
 */
#define CREATE_VPTE_OFFSET1(r1, r2) \
				srax	r1, 10, r2
#define CREATE_VPTE_OFFSET2(r1, r2)
#define CREATE_VPTE_NOP		nop
#define CREATE_VPTE_OFFSET2(r1, r2) nop
#else /* PAGE_SHIFT */
#define CREATE_VPTE_OFFSET1(r1, r2) \
				srax	r1, PAGE_SHIFT, r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
				sllx	r2, 3, r2
#define CREATE_VPTE_NOP
#endif /* PAGE_SHIFT */


@@ -36,6 +34,7 @@
 */

/* ITLB ** ICACHE line 1: Quick user TLB misses		*/
	mov		TLB_SFSR, %g1
	ldxa		[%g1 + %g1] ASI_IMMU, %g4	! Get TAG_ACCESS
	CREATE_VPTE_OFFSET1(%g4, %g6)			! Create VPTE offset
	CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
@@ -43,41 +42,38 @@
1:	brgez,pn	%g5, 3f				! Not valid, branch out
	 sethi		%hi(_PAGE_EXEC), %g4		! Delay-slot
	andcc		%g5, %g4, %g0			! Executable?

/* ITLB ** ICACHE line 2: Real faults			*/
	be,pn		%xcc, 3f			! Nope, branch.
	 nop						! Delay-slot
2:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN	! Load PTE into TLB
	retry						! Trap return
3:	rdpr		%pstate, %g4			! Move into alternate globals

/* ITLB ** ICACHE line 2: Real faults			*/
3:	rdpr		%pstate, %g4			! Move into alt-globals
	wrpr		%g4, PSTATE_AG|PSTATE_MG, %pstate
	rdpr		%tpc, %g5			! And load faulting VA
	mov		FAULT_CODE_ITLB, %g4		! It was read from ITLB
sparc64_realfault_common:				! Called by TL0 dtlb_miss too

/* ITLB ** ICACHE line 3: Finish faults	*/
sparc64_realfault_common:				! Called by dtlb_miss
	stb		%g4, [%g6 + TI_FAULT_CODE]
	stx		%g5, [%g6 + TI_FAULT_ADDR]
	ba,pt		%xcc, etrap			! Save state
1:	 rd		%pc, %g7			! ...
	nop

/* ITLB ** ICACHE line 3: Finish faults + window fixups	*/
	call		do_sparc64_fault		! Call fault handler
	 add		%sp, PTREGS_OFF, %o0! Compute pt_regs arg
	ba,pt		%xcc, rtrap_clr_l6		! Restore cpu state
	 nop

/* ITLB ** ICACHE line 4: Window fixups */
winfix_trampoline:
	rdpr		%tpc, %g3			! Prepare winfixup TNPC
	or		%g3, 0x7c, %g3			! Compute offset to branch
	or		%g3, 0x7c, %g3			! Compute branch offset
	wrpr		%g3, %tnpc			! Write it into TNPC
	done						! Do it to it

/* ITLB ** ICACHE line 4: Unused...	*/
	nop
	nop
	nop
	nop
	CREATE_VPTE_NOP

#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
#undef CREATE_VPTE_NOP
+44 −48
Original line number Diff line number Diff line
@@ -58,9 +58,6 @@ vpte_noent:
	done

vpte_insn_obp:
	sethi		%hi(prom_pmd_phys), %g5
	ldx		[%g5 + %lo(prom_pmd_phys)], %g5

	/* Behave as if we are at TL0.  */
	wrpr		%g0, 1, %tl
	rdpr		%tpc, %g4	/* Find original faulting iaddr */
@@ -71,58 +68,57 @@ vpte_insn_obp:
	mov		TLB_SFSR, %g1
	stxa		%g4, [%g1 + %g1] ASI_IMMU

	/* Get PMD offset.  */
	srlx		%g4, 23, %g6
	and		%g6, 0x7ff, %g6
	sllx		%g6, 2, %g6

	/* Load PMD, is it valid?  */
	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 sllx		%g5, 11, %g5

	/* Get PTE offset.  */
	srlx		%g4, 13, %g6
	and		%g6, 0x3ff, %g6
	sllx		%g6, 3, %g6

	/* Load PTE.  */
	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brgez,pn	%g5, longpath
	 nop

	/* TLB load and return from trap.  */
	sethi		%hi(prom_trans), %g5
	or		%g5, %lo(prom_trans), %g5

1:	ldx		[%g5 + 0x00], %g6	! base
	brz,a,pn	%g6, longpath		! no more entries, fail
	 mov		TLB_SFSR, %g1		! and restore %g1
	ldx		[%g5 + 0x08], %g1	! len
	add		%g6, %g1, %g1		! end
	cmp		%g6, %g4
	bgu,pt		%xcc, 2f
	 cmp		%g4, %g1
	bgeu,pt		%xcc, 2f
	 ldx		[%g5 + 0x10], %g1	! PTE

	/* TLB load, restore %g1, and return from trap.  */
	sub		%g4, %g6, %g6
	add		%g1, %g6, %g5
	mov		TLB_SFSR, %g1
	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
	retry

kvmap_do_obp:
	sethi		%hi(prom_pmd_phys), %g5
	ldx		[%g5 + %lo(prom_pmd_phys)], %g5

	/* Get PMD offset.  */
	srlx		%g4, 23, %g6
	and		%g6, 0x7ff, %g6
	sllx		%g6, 2, %g6
2:	ba,pt		%xcc, 1b
	 add		%g5, (3 * 8), %g5	! next entry

	/* Load PMD, is it valid?  */
	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 sllx		%g5, 11, %g5

	/* Get PTE offset.  */
	srlx		%g4, 13, %g6
	and		%g6, 0x3ff, %g6
	sllx		%g6, 3, %g6

	/* Load PTE.  */
	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brgez,pn	%g5, longpath
	 nop

	/* TLB load and return from trap.  */
kvmap_do_obp:
	sethi		%hi(prom_trans), %g5
	or		%g5, %lo(prom_trans), %g5
	srlx		%g4, 13, %g4
	sllx		%g4, 13, %g4

1:	ldx		[%g5 + 0x00], %g6	! base
	brz,a,pn	%g6, longpath		! no more entries, fail
	 mov		TLB_SFSR, %g1		! and restore %g1
	ldx		[%g5 + 0x08], %g1	! len
	add		%g6, %g1, %g1		! end
	cmp		%g6, %g4
	bgu,pt		%xcc, 2f
	 cmp		%g4, %g1
	bgeu,pt		%xcc, 2f
	 ldx		[%g5 + 0x10], %g1	! PTE

	/* TLB load, restore %g1, and return from trap.  */
	sub		%g4, %g6, %g6
	add		%g1, %g6, %g5
	mov		TLB_SFSR, %g1
	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
	retry

2:	ba,pt		%xcc, 1b
	 add		%g5, (3 * 8), %g5	! next entry

/*
 * On a first level data miss, check whether this is to the OBP range (note
 * that such accesses can be made by prom, as well as by kernel using
Loading