Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c52a96e authored by David S. Miller's avatar David S. Miller
Browse files

[SPARC64]: Revamp Spitfire error trap handling.



Current uncorrectable error handling was poor enough
that the processor could just loop taking the same
trap over and over again.  Fix things up so that we
at least get a log message and perhaps even some register
state.

In the process, much consolidation became possible,
particularly with the correctable error handler.

Prefix assembler and C function names with "spitfire"
to indicate that these are for Ultra-I/II/IIi/IIe only.

More work is needed to make these routines robust and
featureful to the level of the Ultra-III error handlers.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bde4e4ee
Loading
Loading
Loading
Loading
+163 −103
Original line number Original line Diff line number Diff line
@@ -21,6 +21,7 @@
#include <asm/visasm.h>
#include <asm/visasm.h>
#include <asm/estate.h>
#include <asm/estate.h>
#include <asm/auxio.h>
#include <asm/auxio.h>
#include <asm/sfafsr.h>


#define curptr      g6
#define curptr      g6


@@ -690,9 +691,159 @@ netbsd_syscall:
	retl
	retl
	 nop
	 nop


	.globl		__do_data_access_exception
	/* We need to carefully read the error status, ACK
	.globl		__do_data_access_exception_tl1
	 * the errors, prevent recursive traps, and pass the
__do_data_access_exception_tl1:
	 * information on to C code for logging.
	 *
	 * We pass the AFAR in as-is, and we encode the status
	 * information as described in asm-sparc64/sfafsr.h
	 */
	.globl		__spitfire_access_error
__spitfire_access_error:
	/* Disable ESTATE error reporting so that we do not
	 * take recursive traps and RED state the processor.
	 */
	stxa		%g0, [%g0] ASI_ESTATE_ERROR_EN
	membar		#Sync

	mov		UDBE_UE, %g1
	ldxa		[%g0] ASI_AFSR, %g4	! Get AFSR

	/* __spitfire_cee_trap branches here with AFSR in %g4 and
	 * UDBE_CE in %g1.  It only clears ESTATE_ERR_CE in the
	 * ESTATE Error Enable register.
	 */
__spitfire_cee_trap_continue:
	ldxa		[%g0] ASI_AFAR, %g5	! Get AFAR

	rdpr		%tt, %g3
	and		%g3, 0x1ff, %g3		! Paranoia
	sllx		%g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
	or		%g4, %g3, %g4
	rdpr		%tl, %g3
	cmp		%g3, 1
	mov		1, %g3
	bleu		%xcc, 1f
	 sllx		%g3, SFSTAT_TL_GT_ONE_SHIFT, %g3

	or		%g4, %g3, %g4

	/* Read in the UDB error register state, clearing the
	 * sticky error bits as-needed.  We only clear them if
	 * the UE bit is set.  Likewise, __spitfire_cee_trap
	 * below will only do so if the CE bit is set.
	 *
	 * NOTE: UltraSparc-I/II have high and low UDB error
	 *       registers, corresponding to the two UDB units
	 *       present on those chips.  UltraSparc-IIi only
	 *       has a single UDB, called "SDB" in the manual.
	 *       For IIi the upper UDB register always reads
	 *       as zero so for our purposes things will just
	 *       work with the checks below.
	 */
1:	ldxa		[%g0] ASI_UDBH_ERROR_R, %g3
	and		%g3, 0x3ff, %g7		! Paranoia
	sllx		%g7, SFSTAT_UDBH_SHIFT, %g7
	or		%g4, %g7, %g4
	andcc		%g3, %g1, %g3		! UDBE_UE or UDBE_CE
	be,pn		%xcc, 1f
	 nop
	stxa		%g3, [%g0] ASI_UDB_ERROR_W
	membar		#Sync

1:	mov		0x18, %g3
	ldxa		[%g3] ASI_UDBL_ERROR_R, %g3
	and		%g3, 0x3ff, %g7		! Paranoia
	sllx		%g7, SFSTAT_UDBL_SHIFT, %g7
	or		%g4, %g7, %g4
	andcc		%g3, %g1, %g3		! UDBE_UE or UDBE_CE
	be,pn		%xcc, 1f
	 nop
	mov		0x18, %g7
	stxa		%g3, [%g7] ASI_UDB_ERROR_W
	membar		#Sync

1:	/* Ok, now that we've latched the error state,
	 * clear the sticky bits in the AFSR.
	 */
	stxa		%g4, [%g0] ASI_AFSR
	membar		#Sync

	rdpr		%tl, %g2
	cmp		%g2, 1
	rdpr		%pil, %g2
	bleu,pt		%xcc, 1f
	 wrpr		%g0, 15, %pil

	ba,pt		%xcc, etraptl1
	 rd		%pc, %g7

	ba,pt		%xcc, 2f
	 nop

1:	ba,pt		%xcc, etrap_irq
	 rd		%pc, %g7

2:	mov		%l4, %o1
	mov		%l5, %o2
	call		spitfire_access_error
	 add		%sp, PTREGS_OFF, %o0
	ba,pt		%xcc, rtrap
	 clr		%l6

	/* This is the trap handler entry point for ECC correctable
	 * errors.  They are corrected, but we listen for the trap
	 * so that the event can be logged.
	 *
	 * Disrupting errors are either:
	 * 1) single-bit ECC errors during UDB reads to system
	 *    memory
	 * 2) data parity errors during write-back events
	 *
	 * As far as I can make out from the manual, the CEE trap
	 * is only for correctable errors during memory read
	 * accesses by the front-end of the processor.
	 *
	 * The code below is only for trap level 1 CEE events,
	 * as it is the only situation where we can safely record
	 * and log.  For trap level >1 we just clear the CE bit
	 * in the AFSR and return.
	 *
	 * This is just like __spiftire_access_error above, but it
	 * specifically handles correctable errors.  If an
	 * uncorrectable error is indicated in the AFSR we
	 * will branch directly above to __spitfire_access_error
	 * to handle it instead.  Uncorrectable therefore takes
	 * priority over correctable, and the error logging
	 * C code will notice this case by inspecting the
	 * trap type.
	 */
	.globl		__spitfire_cee_trap
__spitfire_cee_trap:
	ldxa		[%g0] ASI_AFSR, %g4	! Get AFSR
	mov		1, %g3
	sllx		%g3, SFAFSR_UE_SHIFT, %g3
	andcc		%g4, %g3, %g0		! Check for UE
	bne,pn		%xcc, __spitfire_access_error
	 nop

	/* Ok, in this case we only have a correctable error.
	 * Indicate we only wish to capture that state in register
	 * %g1, and we only disable CE error reporting unlike UE
	 * handling which disables all errors.
	 */
	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g3
	andn		%g3, ESTATE_ERR_CE, %g3
	stxa		%g3, [%g0] ASI_ESTATE_ERROR_EN
	membar		#Sync

	/* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
	ba,pt		%xcc, __spitfire_cee_trap_continue
	 mov		UDBE_CE, %g1

	.globl		__spitfire_data_access_exception
	.globl		__spitfire_data_access_exception_tl1
__spitfire_data_access_exception_tl1:
	rdpr		%pstate, %g4
	rdpr		%pstate, %g4
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	mov		TLB_SFSR, %g3
	mov		TLB_SFSR, %g3
@@ -714,12 +865,12 @@ __do_data_access_exception_tl1:
109:	 or		%g7, %lo(109b), %g7
109:	 or		%g7, %lo(109b), %g7
	mov		%l4, %o1
	mov		%l4, %o1
	mov		%l5, %o2
	mov		%l5, %o2
	call		data_access_exception_tl1
	call		spitfire_data_access_exception_tl1
	 add		%sp, PTREGS_OFF, %o0
	 add		%sp, PTREGS_OFF, %o0
	ba,pt		%xcc, rtrap
	ba,pt		%xcc, rtrap
	 clr		%l6
	 clr		%l6


__do_data_access_exception:
__spitfire_data_access_exception:
	rdpr		%pstate, %g4
	rdpr		%pstate, %g4
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	mov		TLB_SFSR, %g3
	mov		TLB_SFSR, %g3
@@ -733,14 +884,14 @@ __do_data_access_exception:
109:	 or		%g7, %lo(109b), %g7
109:	 or		%g7, %lo(109b), %g7
	mov		%l4, %o1
	mov		%l4, %o1
	mov		%l5, %o2
	mov		%l5, %o2
	call		data_access_exception
	call		spitfire_data_access_exception
	 add		%sp, PTREGS_OFF, %o0
	 add		%sp, PTREGS_OFF, %o0
	ba,pt		%xcc, rtrap
	ba,pt		%xcc, rtrap
	 clr		%l6
	 clr		%l6


	.globl		__do_instruction_access_exception
	.globl		__spitfire_insn_access_exception
	.globl		__do_instruction_access_exception_tl1
	.globl		__spitfire_insn_access_exception_tl1
__do_instruction_access_exception_tl1:
__spitfire_insn_access_exception_tl1:
	rdpr		%pstate, %g4
	rdpr		%pstate, %g4
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	mov		TLB_SFSR, %g3
	mov		TLB_SFSR, %g3
@@ -753,12 +904,12 @@ __do_instruction_access_exception_tl1:
109:	 or		%g7, %lo(109b), %g7
109:	 or		%g7, %lo(109b), %g7
	mov		%l4, %o1
	mov		%l4, %o1
	mov		%l5, %o2
	mov		%l5, %o2
	call		instruction_access_exception_tl1
	call		spitfire_insn_access_exception_tl1
	 add		%sp, PTREGS_OFF, %o0
	 add		%sp, PTREGS_OFF, %o0
	ba,pt		%xcc, rtrap
	ba,pt		%xcc, rtrap
	 clr		%l6
	 clr		%l6


__do_instruction_access_exception:
__spitfire_insn_access_exception:
	rdpr		%pstate, %g4
	rdpr		%pstate, %g4
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
	mov		TLB_SFSR, %g3
	mov		TLB_SFSR, %g3
@@ -771,102 +922,11 @@ __do_instruction_access_exception:
109:	 or		%g7, %lo(109b), %g7
109:	 or		%g7, %lo(109b), %g7
	mov		%l4, %o1
	mov		%l4, %o1
	mov		%l5, %o2
	mov		%l5, %o2
	call		instruction_access_exception
	call		spitfire_insn_access_exception
	 add		%sp, PTREGS_OFF, %o0
	 add		%sp, PTREGS_OFF, %o0
	ba,pt		%xcc, rtrap
	ba,pt		%xcc, rtrap
	 clr		%l6
	 clr		%l6


	/* This is the trap handler entry point for ECC correctable
	 * errors.  They are corrected, but we listen for the trap
	 * so that the event can be logged.
	 *
	 * Disrupting errors are either:
	 * 1) single-bit ECC errors during UDB reads to system
	 *    memory
	 * 2) data parity errors during write-back events
	 *
	 * As far as I can make out from the manual, the CEE trap
	 * is only for correctable errors during memory read
	 * accesses by the front-end of the processor.
	 *
	 * The code below is only for trap level 1 CEE events,
	 * as it is the only situation where we can safely record
	 * and log.  For trap level >1 we just clear the CE bit
	 * in the AFSR and return.
	 */

	/* Our trap handling infrastructure allows us to preserve
	 * two 64-bit values during etrap for arguments to
	 * subsequent C code.  Therefore we encode the information
	 * as follows:
	 *
	 * value 1) Full 64-bits of AFAR
	 * value 2) Low 33-bits of AFSR, then bits 33-->42
	 *          are UDBL error status and bits 43-->52
	 *          are UDBH error status
	 */
	.align	64
	.globl	cee_trap
cee_trap:
	ldxa	[%g0] ASI_AFSR, %g1		! Read AFSR
	ldxa	[%g0] ASI_AFAR, %g2		! Read AFAR
	sllx	%g1, 31, %g1			! Clear reserved bits
	srlx	%g1, 31, %g1			! in AFSR

	/* NOTE: UltraSparc-I/II have high and low UDB error
	 *       registers, corresponding to the two UDB units
	 *       present on those chips.  UltraSparc-IIi only
	 *       has a single UDB, called "SDB" in the manual.
	 *       For IIi the upper UDB register always reads
	 *       as zero so for our purposes things will just
	 *       work with the checks below.
	 */
	ldxa	[%g0] ASI_UDBL_ERROR_R, %g3	! Read UDB-Low error status
	andcc	%g3, (1 << 8), %g4		! Check CE bit
	sllx	%g3, (64 - 10), %g3		! Clear reserved bits
	srlx	%g3, (64 - 10), %g3		! in UDB-Low error status

	sllx	%g3, (33 + 0), %g3		! Shift up to encoding area
	or	%g1, %g3, %g1			! Or it in
	be,pn	%xcc, 1f			! Branch if CE bit was clear
	 nop
	stxa	%g4, [%g0] ASI_UDB_ERROR_W	! Clear CE sticky bit in UDBL
	membar	#Sync				! Synchronize ASI stores
1:	mov	0x18, %g5			! Addr of UDB-High error status
	ldxa	[%g5] ASI_UDBH_ERROR_R, %g3	! Read it

	andcc	%g3, (1 << 8), %g4		! Check CE bit
	sllx	%g3, (64 - 10), %g3		! Clear reserved bits
	srlx	%g3, (64 - 10), %g3		! in UDB-High error status
	sllx	%g3, (33 + 10), %g3		! Shift up to encoding area
	or	%g1, %g3, %g1			! Or it in
	be,pn	%xcc, 1f			! Branch if CE bit was clear
	 nop
	nop

	stxa	%g4, [%g5] ASI_UDB_ERROR_W	! Clear CE sticky bit in UDBH
	membar	#Sync				! Synchronize ASI stores
1:	mov	1, %g5				! AFSR CE bit is
	sllx	%g5, 20, %g5			! bit 20
	stxa	%g5, [%g0] ASI_AFSR		! Clear CE sticky bit in AFSR
	membar	#Sync				! Synchronize ASI stores
	sllx	%g2, (64 - 41), %g2		! Clear reserved bits
	srlx	%g2, (64 - 41), %g2		! in latched AFAR

	andn	%g2, 0x0f, %g2			! Finish resv bit clearing
	mov	%g1, %g4			! Move AFSR+UDB* into save reg
	mov	%g2, %g5			! Move AFAR into save reg
	rdpr	%pil, %g2
	wrpr	%g0, 15, %pil
	ba,pt	%xcc, etrap_irq
	 rd	%pc, %g7
	mov	%l4, %o0

	mov	%l5, %o1
	call	cee_log
	 add	%sp, PTREGS_OFF, %o2
	ba,a,pt	%xcc, rtrap_irq

	/* Capture I/D/E-cache state into per-cpu error scoreboard.
	/* Capture I/D/E-cache state into per-cpu error scoreboard.
	 *
	 *
	 * %g1:		(TL>=0) ? 1 : 0
	 * %g1:		(TL>=0) ? 1 : 0
+145 −119
Original line number Original line Diff line number Diff line
@@ -33,6 +33,7 @@
#include <asm/dcu.h>
#include <asm/dcu.h>
#include <asm/estate.h>
#include <asm/estate.h>
#include <asm/chafsr.h>
#include <asm/chafsr.h>
#include <asm/sfafsr.h>
#include <asm/psrcompat.h>
#include <asm/psrcompat.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/timer.h>
@@ -143,8 +144,7 @@ void do_BUG(const char *file, int line)
}
}
#endif
#endif


void instruction_access_exception(struct pt_regs *regs,
void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
				  unsigned long sfsr, unsigned long sfar)
{
{
	siginfo_t info;
	siginfo_t info;


@@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs,
		return;
		return;


	if (regs->tstate & TSTATE_PRIV) {
	if (regs->tstate & TSTATE_PRIV) {
		printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
		printk("spitfire_insn_access_exception: SFSR[%016lx] "
		       sfsr, sfar);
		       "SFAR[%016lx], going.\n", sfsr, sfar);
		die_if_kernel("Iax", regs);
		die_if_kernel("Iax", regs);
	}
	}
	if (test_thread_flag(TIF_32BIT)) {
	if (test_thread_flag(TIF_32BIT)) {
@@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs,
	force_sig_info(SIGSEGV, &info, current);
	force_sig_info(SIGSEGV, &info, current);
}
}


void instruction_access_exception_tl1(struct pt_regs *regs,
void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
				      unsigned long sfsr, unsigned long sfar)
{
{
	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
		return;
		return;


	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
	instruction_access_exception(regs, sfsr, sfar);
	spitfire_insn_access_exception(regs, sfsr, sfar);
}
}


void data_access_exception(struct pt_regs *regs,
void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
			   unsigned long sfsr, unsigned long sfar)
{
{
	siginfo_t info;
	siginfo_t info;


@@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs,
			return;
			return;
		}
		}
		/* Shit... */
		/* Shit... */
		printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
		printk("spitfire_data_access_exception: SFSR[%016lx] "
		       sfsr, sfar);
		       "SFAR[%016lx], going.\n", sfsr, sfar);
		die_if_kernel("Dax", regs);
		die_if_kernel("Dax", regs);
	}
	}


@@ -220,15 +218,14 @@ void data_access_exception(struct pt_regs *regs,
	force_sig_info(SIGSEGV, &info, current);
	force_sig_info(SIGSEGV, &info, current);
}
}


void data_access_exception_tl1(struct pt_regs *regs,
void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
			       unsigned long sfsr, unsigned long sfar)
{
{
	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
		return;
		return;


	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
	data_access_exception(regs, sfsr, sfar);
	spitfire_data_access_exception(regs, sfsr, sfar);
}
}


#ifdef CONFIG_PCI
#ifdef CONFIG_PCI
@@ -264,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void)
			     : "memory");
			     : "memory");
}
}


void do_iae(struct pt_regs *regs)
static void spitfire_enable_estate_errors(void)
{
{
	siginfo_t info;
	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"

			     "membar	#Sync"
	spitfire_clean_and_reenable_l1_caches();
			     : /* no outputs */

			     : "r" (ESTATE_ERR_ALL),
	if (notify_die(DIE_TRAP, "instruction access exception", regs,
			       "i" (ASI_ESTATE_ERROR_EN));
		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
		return;

	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_OBJERR;
	info.si_addr = (void *)0;
	info.si_trapno = 0;
	force_sig_info(SIGBUS, &info, current);
}

void do_dae(struct pt_regs *regs)
{
	siginfo_t info;

#ifdef CONFIG_PCI
	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
		spitfire_clean_and_reenable_l1_caches();

		pci_poke_faulted = 1;

		/* Why the fuck did they have to change this? */
		if (tlb_type == cheetah || tlb_type == cheetah_plus)
			regs->tpc += 4;

		regs->tnpc = regs->tpc + 4;
		return;
	}
#endif
	spitfire_clean_and_reenable_l1_caches();

	if (notify_die(DIE_TRAP, "data access exception", regs,
		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
		return;

	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_OBJERR;
	info.si_addr = (void *)0;
	info.si_trapno = 0;
	force_sig_info(SIGBUS, &info, current);
}
}


static char ecc_syndrome_table[] = {
static char ecc_syndrome_table[] = {
@@ -349,65 +305,15 @@ static char ecc_syndrome_table[] = {
	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
};
};


/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
 * in the following format.  The AFAR is left as is, with
 * reserved bits cleared, and is a raw 40-bit physical
 * address.
 */
#define CE_STATUS_UDBH_UE		(1UL << (43 + 9))
#define CE_STATUS_UDBH_CE		(1UL << (43 + 8))
#define CE_STATUS_UDBH_ESYNDR		(0xffUL << 43)
#define CE_STATUS_UDBH_SHIFT		43
#define CE_STATUS_UDBL_UE		(1UL << (33 + 9))
#define CE_STATUS_UDBL_CE		(1UL << (33 + 8))
#define CE_STATUS_UDBL_ESYNDR		(0xffUL << 33)
#define CE_STATUS_UDBL_SHIFT		33
#define CE_STATUS_AFSR_MASK		(0x1ffffffffUL)
#define CE_STATUS_AFSR_ME		(1UL << 32)
#define CE_STATUS_AFSR_PRIV		(1UL << 31)
#define CE_STATUS_AFSR_ISAP		(1UL << 30)
#define CE_STATUS_AFSR_ETP		(1UL << 29)
#define CE_STATUS_AFSR_IVUE		(1UL << 28)
#define CE_STATUS_AFSR_TO		(1UL << 27)
#define CE_STATUS_AFSR_BERR		(1UL << 26)
#define CE_STATUS_AFSR_LDP		(1UL << 25)
#define CE_STATUS_AFSR_CP		(1UL << 24)
#define CE_STATUS_AFSR_WP		(1UL << 23)
#define CE_STATUS_AFSR_EDP		(1UL << 22)
#define CE_STATUS_AFSR_UE		(1UL << 21)
#define CE_STATUS_AFSR_CE		(1UL << 20)
#define CE_STATUS_AFSR_ETS		(0xfUL << 16)
#define CE_STATUS_AFSR_ETS_SHIFT	16
#define CE_STATUS_AFSR_PSYND		(0xffffUL << 0)
#define CE_STATUS_AFSR_PSYND_SHIFT	0

/* Layout of Ecache TAG Parity Syndrome of AFSR */
#define AFSR_ETSYNDROME_7_0		0x1UL /* E$-tag bus bits  <7:0> */
#define AFSR_ETSYNDROME_15_8		0x2UL /* E$-tag bus bits <15:8> */
#define AFSR_ETSYNDROME_21_16		0x4UL /* E$-tag bus bits <21:16> */
#define AFSR_ETSYNDROME_24_22		0x8UL /* E$-tag bus bits <24:22> */

static char *syndrome_unknown = "<Unknown>";
static char *syndrome_unknown = "<Unknown>";


asmlinkage void cee_log(unsigned long ce_status,
static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
			unsigned long afar,
			struct pt_regs *regs)
{
{
	char memmod_str[64];
	unsigned short scode;
	char *p;
	char memmod_str[64], *p;
	unsigned short scode, udb_reg;


	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
	if (udbl & bit) {
	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
		scode = ecc_syndrome_table[udbl & 0xff];
	       smp_processor_id(),
	       (ce_status & CE_STATUS_AFSR_MASK),
	       afar,
	       ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
	       ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));

	udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
	if (udb_reg & (1 << 8)) {
		scode = ecc_syndrome_table[udb_reg & 0xff];
		if (prom_getunumber(scode, afar,
		if (prom_getunumber(scode, afar,
				    memmod_str, sizeof(memmod_str)) == -1)
				    memmod_str, sizeof(memmod_str)) == -1)
			p = syndrome_unknown;
			p = syndrome_unknown;
@@ -418,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status,
		       smp_processor_id(), scode, p);
		       smp_processor_id(), scode, p);
	}
	}


	udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
	if (udbh & bit) {
	if (udb_reg & (1 << 8)) {
		scode = ecc_syndrome_table[udbh & 0xff];
		scode = ecc_syndrome_table[udb_reg & 0xff];
		if (prom_getunumber(scode, afar,
		if (prom_getunumber(scode, afar,
				    memmod_str, sizeof(memmod_str)) == -1)
				    memmod_str, sizeof(memmod_str)) == -1)
			p = syndrome_unknown;
			p = syndrome_unknown;
@@ -430,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status,
		       "Memory Module \"%s\"\n",
		       "Memory Module \"%s\"\n",
		       smp_processor_id(), scode, p);
		       smp_processor_id(), scode, p);
	}
	}

}

static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
{

	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);

	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);

	/* We always log it, even if someone is listening for this
	 * trap.
	 */
	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
		   0, TRAP_TYPE_CEE, SIGTRAP);

	/* The Correctable ECC Error trap does not disable I/D caches.  So
	 * we only have to restore the ESTATE Error Enable register.
	 */
	spitfire_enable_estate_errors();
}

static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
{
	siginfo_t info;

	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);

	/* XXX add more human friendly logging of the error status
	 * XXX as is implemented for cheetah
	 */

	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);

	/* We always log it, even if someone is listening for this
	 * trap.
	 */
	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
		   0, tt, SIGTRAP);

	if (regs->tstate & TSTATE_PRIV) {
		if (tl1)
			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
		die_if_kernel("UE", regs);
	}

	/* XXX need more intelligent processing here, such as is implemented
	 * XXX for cheetah errors, in fact if the E-cache still holds the
	 * XXX line with bad parity this will loop
	 */

	spitfire_clean_and_reenable_l1_caches();
	spitfire_enable_estate_errors();

	if (test_thread_flag(TIF_32BIT)) {
		regs->tpc &= 0xffffffff;
		regs->tnpc &= 0xffffffff;
	}
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_OBJERR;
	info.si_addr = (void *)0;
	info.si_trapno = 0;
	force_sig_info(SIGBUS, &info, current);
}

void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
{
	unsigned long afsr, tt, udbh, udbl;
	int tl1;

	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;

#ifdef CONFIG_PCI
	if (tt == TRAP_TYPE_DAE &&
	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
		spitfire_clean_and_reenable_l1_caches();
		spitfire_enable_estate_errors();

		pci_poke_faulted = 1;
		regs->tnpc = regs->tpc + 4;
		return;
	}
#endif

	if (afsr & SFAFSR_UE)
		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);

	if (tt == TRAP_TYPE_CEE) {
		/* Handle the case where we took a CEE trap, but ACK'd
		 * only the UE state in the UDB error registers.
		 */
		if (afsr & SFAFSR_UE) {
			if (udbh & UDBE_CE) {
				__asm__ __volatile__(
					"stxa	%0, [%1] %2\n\t"
					"membar	#Sync"
					: /* no outputs */
					: "r" (udbh & UDBE_CE),
					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
			}
			if (udbl & UDBE_CE) {
				__asm__ __volatile__(
					"stxa	%0, [%1] %2\n\t"
					"membar	#Sync"
					: /* no outputs */
					: "r" (udbl & UDBE_CE),
					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
			}
		}

		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
	}
}
}


int cheetah_pcache_forced_on;
int cheetah_pcache_forced_on;
+16 −11
Original line number Original line Diff line number Diff line
@@ -18,9 +18,10 @@ sparc64_ttable_tl0:
tl0_resv000:	BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv000:	BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv004:	BTRAP(0x4)  BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_resv004:	BTRAP(0x4)  BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_iax:	membar #Sync
tl0_iax:	membar #Sync
		TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
		TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
tl0_resv009:	BTRAP(0x9)
tl0_resv009:	BTRAP(0x9)
tl0_iae:	TRAP(do_iae)
tl0_iae:	membar #Sync
		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv00b:	BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_resv00b:	BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_ill:	membar #Sync
tl0_ill:	membar #Sync
		TRAP_7INSNS(do_illegal_instruction)
		TRAP_7INSNS(do_illegal_instruction)
@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW
tl0_div0:	TRAP(do_div0)
tl0_div0:	TRAP(do_div0)
tl0_resv029:	BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv029:	BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv02f:	BTRAP(0x2f)
tl0_resv02f:	BTRAP(0x2f)
tl0_dax:	TRAP_NOSAVE(__do_data_access_exception)
tl0_dax:	TRAP_NOSAVE(__spitfire_data_access_exception)
tl0_resv031:	BTRAP(0x31)
tl0_resv031:	BTRAP(0x31)
tl0_dae:	TRAP(do_dae)
tl0_dae:	membar #Sync
		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv033:	BTRAP(0x33)
tl0_resv033:	BTRAP(0x33)
tl0_mna:	TRAP_NOSAVE(do_mna)
tl0_mna:	TRAP_NOSAVE(do_mna)
tl0_lddfmna:	TRAP_NOSAVE(do_lddfmna)
tl0_lddfmna:	TRAP_NOSAVE(do_lddfmna)
@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
tl0_ivec:	TRAP_IVEC
tl0_ivec:	TRAP_IVEC
tl0_paw:	TRAP(do_paw)
tl0_paw:	TRAP(do_paw)
tl0_vaw:	TRAP(do_vaw)
tl0_vaw:	TRAP(do_vaw)
tl0_cee:	TRAP_NOSAVE(cee_trap)
tl0_cee:	membar #Sync
		TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
tl0_iamiss:
tl0_iamiss:
#include	"itlb_base.S"
#include	"itlb_base.S"
tl0_damiss:
tl0_damiss:
@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
sparc64_ttable_tl1:
sparc64_ttable_tl1:
tl1_resv000:	BOOT_KERNEL    BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv000:	BOOT_KERNEL    BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv004:	BTRAPTL1(0x4)  BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
tl1_resv004:	BTRAPTL1(0x4)  BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
tl1_iax:	TRAP_NOSAVE(__do_instruction_access_exception_tl1)
tl1_iax:	TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
tl1_resv009:	BTRAPTL1(0x9)
tl1_resv009:	BTRAPTL1(0x9)
tl1_iae:	TRAPTL1(do_iae_tl1)
tl1_iae:	membar #Sync
		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv00b:	BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_resv00b:	BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_ill:	TRAPTL1(do_ill_tl1)
tl1_ill:	TRAPTL1(do_ill_tl1)
tl1_privop:	BTRAPTL1(0x11)
tl1_privop:	BTRAPTL1(0x11)
@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW
tl1_div0:	TRAPTL1(do_div0_tl1)
tl1_div0:	TRAPTL1(do_div0_tl1)
tl1_resv029:	BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv029:	BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv02d:	BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
tl1_resv02d:	BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
tl1_dax:	TRAP_NOSAVE(__do_data_access_exception_tl1)
tl1_dax:	TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
tl1_resv031:	BTRAPTL1(0x31)
tl1_resv031:	BTRAPTL1(0x31)
tl1_dae:	TRAPTL1(do_dae_tl1)
tl1_dae:	membar #Sync
		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv033:	BTRAPTL1(0x33)
tl1_resv033:	BTRAPTL1(0x33)
tl1_mna:	TRAP_NOSAVE(do_mna)
tl1_mna:	TRAP_NOSAVE(do_mna)
tl1_lddfmna:	TRAPTL1(do_lddfmna_tl1)
tl1_lddfmna:	TRAPTL1(do_lddfmna_tl1)
@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw:	TRAPTL1(do_vaw_tl1)
tl1_vaw:	TRAPTL1(do_vaw_tl1)


		/* The grotty trick to save %g1 into current->thread.cee_stuff
		/* The grotty trick to save %g1 into current->thread.cee_stuff
		 * is because when we take this trap we could be interrupting trap
		 * is because when we take this trap we could be interrupting
		 * code already using the trap alternate global registers.
		 * trap code already using the trap alternate global registers.
		 *
		 *
		 * We cross our fingers and pray that this store/load does
		 * We cross our fingers and pray that this store/load does
		 * not cause yet another CEE trap.
		 * not cause yet another CEE trap.
+9 −9
Original line number Original line Diff line number Diff line
@@ -349,7 +349,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)


extern void do_fpother(struct pt_regs *regs);
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
extern void data_access_exception(struct pt_regs *regs,
extern void spitfire_data_access_exception(struct pt_regs *regs,
					   unsigned long sfsr,
					   unsigned long sfsr,
					   unsigned long sfar);
					   unsigned long sfar);


@@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
				break;
				break;
			}
			}
		default:
		default:
			data_access_exception(regs, 0, addr);
			spitfire_data_access_exception(regs, 0, addr);
			return 1;
			return 1;
		}
		}
		if (put_user (first >> 32, (u32 __user *)addr) ||
		if (put_user (first >> 32, (u32 __user *)addr) ||
		    __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
		    __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
		    __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
		    __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
		    __put_user ((u32)second, (u32 __user *)(addr + 12))) {
		    __put_user ((u32)second, (u32 __user *)(addr + 12))) {
		    	data_access_exception(regs, 0, addr);
		    	spitfire_data_access_exception(regs, 0, addr);
		    	return 1;
		    	return 1;
		}
		}
	} else {
	} else {
@@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
			do_privact(regs);
			do_privact(regs);
			return 1;
			return 1;
		} else if (asi > ASI_SNFL) {
		} else if (asi > ASI_SNFL) {
			data_access_exception(regs, 0, addr);
			spitfire_data_access_exception(regs, 0, addr);
			return 1;
			return 1;
		}
		}
		switch (insn & 0x180000) {
		switch (insn & 0x180000) {
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
				err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
				err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
		}
		}
		if (err && !(asi & 0x2 /* NF */)) {
		if (err && !(asi & 0x2 /* NF */)) {
			data_access_exception(regs, 0, addr);
			spitfire_data_access_exception(regs, 0, addr);
			return 1;
			return 1;
		}
		}
		if (asi & 0x8) /* Little */ {
		if (asi & 0x8) /* Little */ {
@@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
		*(u64 *)(f->regs + freg) = value;
		*(u64 *)(f->regs + freg) = value;
		current_thread_info()->fpsaved[0] |= flag;
		current_thread_info()->fpsaved[0] |= flag;
	} else {
	} else {
daex:		data_access_exception(regs, sfsr, sfar);
daex:		spitfire_data_access_exception(regs, sfsr, sfar);
		return;
		return;
	}
	}
	advance(regs);
	advance(regs);
@@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
		    __put_user ((u32)value, (u32 __user *)(sfar + 4)))
		    __put_user ((u32)value, (u32 __user *)(sfar + 4)))
			goto daex;
			goto daex;
	} else {
	} else {
daex:		data_access_exception(regs, sfsr, sfar);
daex:		spitfire_data_access_exception(regs, sfsr, sfar);
		return;
		return;
	}
	}
	advance(regs);
	advance(regs);
+3 −3
Original line number Original line Diff line number Diff line
@@ -318,7 +318,7 @@ fill_fixup_dax:
	 nop
	 nop
	rdpr		%pstate, %l1			! Prepare to change globals.
	rdpr		%pstate, %l1			! Prepare to change globals.
	mov		%g4, %o1			! Setup args for
	mov		%g4, %o1			! Setup args for
	mov		%g5, %o2			! final call to data_access_exception.
	mov		%g5, %o2			! final call to spitfire_data_access_exception.
	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO


	mov		%g6, %o7			! Stash away current.
	mov		%g6, %o7			! Stash away current.
@@ -330,7 +330,7 @@ fill_fixup_dax:
	mov		TSB_REG, %g1
	mov		TSB_REG, %g1
	ldxa		[%g1] ASI_IMMU, %g5
	ldxa		[%g1] ASI_IMMU, %g5
#endif
#endif
	call		data_access_exception
	call		spitfire_data_access_exception
	 add		%sp, PTREGS_OFF, %o0
	 add		%sp, PTREGS_OFF, %o0


	b,pt		%xcc, rtrap
	b,pt		%xcc, rtrap
@@ -391,7 +391,7 @@ window_dax_from_user_common:
109:	 or		%g7, %lo(109b), %g7
109:	 or		%g7, %lo(109b), %g7
	mov		%l4, %o1
	mov		%l4, %o1
	mov		%l5, %o2
	mov		%l5, %o2
	call		data_access_exception
	call		spitfire_data_access_exception
	 add		%sp, PTREGS_OFF, %o0
	 add		%sp, PTREGS_OFF, %o0
	ba,pt		%xcc, rtrap
	ba,pt		%xcc, rtrap
	 clr		%l6
	 clr		%l6
Loading