Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ccea7a19 authored by Russell King's avatar Russell King
Browse files

[PATCH] ARM SMP: Fix vector entry



The current vector entry system does not allow for SMP.  In
order to work around this, we need to eliminate our reliance
on the fixed save areas, which breaks the way we enable
alignment traps.  This patch changes the way we handle the
save areas such that we can have one per CPU.

Signed-off-by: default avatarRussell King <rmk@arm.linux.org.uk>
parent 49f680ea
Loading
Loading
Loading
Loading
+89 −83
Original line number Original line Diff line number Diff line
@@ -53,46 +53,62 @@
/*
/*
 * Invalid mode handlers
 * Invalid mode handlers
 */
 */
	.macro	inv_entry, sym, reason
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE		@ Allocate frame size in one go
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - lr}			@ Save XXX r0 - lr
	stmib	sp, {r1 - lr}
	ldr	r4, .LC\sym
	mov	r1, #\reason
	mov	r1, #\reason
	.endm
	.endm


__pabt_invalid:
__pabt_invalid:
	inv_entry abt, BAD_PREFETCH
	inv_entry BAD_PREFETCH
	b	1f
	b	common_invalid


__dabt_invalid:
__dabt_invalid:
	inv_entry abt, BAD_DATA
	inv_entry BAD_DATA
	b	1f
	b	common_invalid


__irq_invalid:
__irq_invalid:
	inv_entry irq, BAD_IRQ
	inv_entry BAD_IRQ
	b	1f
	b	common_invalid


__und_invalid:
__und_invalid:
	inv_entry und, BAD_UNDEFINSTR
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"


1:	zero_fp
	ldmia	r4, {r5 - r7}			@ Get XXX pc, cpsr, old_r0
	add	r4, sp, #S_PC
	stmia	r4, {r5 - r7}			@ Save XXX pc, cpsr, old_r0
	mov	r0, sp
	mov	r0, sp
	and	r2, r6, #31			@ int mode
	and	r2, r6, #0x1f
	b	bad_mode
	b	bad_mode


/*
/*
 * SVC mode handlers
 * SVC mode handlers
 */
 */
	.macro	svc_entry, sym
	.macro	svc_entry
	sub	sp, sp, #S_FRAME_SIZE
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ save r0 - r12
	stmib	sp, {r1 - r12}
	ldr	r2, .LC\sym

	add	r0, sp, #S_FRAME_SIZE
	ldmia	r0, {r1 - r3}
	ldmia	r2, {r2 - r4}			@ get pc, cpsr
	add	r5, sp, #S_SP		@ here for interlock avoidance
	add	r5, sp, #S_SP
	mov	r4, #-1			@  ""  ""      ""       ""
	add	r0, sp, #S_FRAME_SIZE   @  ""  ""      ""       ""
	str	r1, [sp]		@ save the "real" r0 copied
					@ from the exception stack

	mov	r1, lr
	mov	r1, lr


	@
	@
@@ -109,7 +125,7 @@ __und_invalid:


	.align	5
	.align	5
__dabt_svc:
__dabt_svc:
	svc_entry abt
	svc_entry


	@
	@
	@ get ready to re-enable interrupts if appropriate
	@ get ready to re-enable interrupts if appropriate
@@ -156,13 +172,15 @@ __dabt_svc:


	.align	5
	.align	5
__irq_svc:
__irq_svc:
	svc_entry irq
	svc_entry

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
	get_thread_info tsk
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
	add	r7, r8, #1			@ increment it
	add	r7, r8, #1			@ increment it
	str	r7, [tsk, #TI_PREEMPT]
	str	r7, [tsk, #TI_PREEMPT]
#endif
#endif

	irq_handler
	irq_handler
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
@@ -200,7 +218,7 @@ svc_preempt:


	.align	5
	.align	5
__und_svc:
__und_svc:
	svc_entry und
	svc_entry


	@
	@
	@ call emulation code, which returns using r9 if it has emulated
	@ call emulation code, which returns using r9 if it has emulated
@@ -230,7 +248,7 @@ __und_svc:


	.align	5
	.align	5
__pabt_svc:
__pabt_svc:
	svc_entry abt
	svc_entry


	@
	@
	@ re-enable interrupts if appropriate
	@ re-enable interrupts if appropriate
@@ -263,12 +281,6 @@ __pabt_svc:
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr


	.align	5
	.align	5
.LCirq:
	.word	__temp_irq
.LCund:
	.word	__temp_und
.LCabt:
	.word	__temp_abt
.LCcralign:
.LCcralign:
	.word	cr_alignment
	.word	cr_alignment
#ifdef MULTI_ABORT
#ifdef MULTI_ABORT
@@ -285,12 +297,16 @@ __pabt_svc:
/*
/*
 * User mode handlers
 * User mode handlers
 */
 */
	.macro	usr_entry, sym
	.macro	usr_entry
	sub	sp, sp, #S_FRAME_SIZE		@ Allocate frame size in one go
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ save r0 - r12
	stmib	sp, {r1 - r12}
	ldr	r7, .LC\sym

	add	r5, sp, #S_PC
	ldmia	r0, {r1 - r3}
	ldmia	r7, {r2 - r4}			@ Get USR pc, cpsr
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r4, #-1			@  ""  ""     ""        ""

	str	r1, [sp]		@ save the "real" r0 copied
					@ from the exception stack


#if __LINUX_ARM_ARCH__ < 6
#if __LINUX_ARM_ARCH__ < 6
	@ make sure our user space atomic helper is aborted
	@ make sure our user space atomic helper is aborted
@@ -307,8 +323,8 @@ __pabt_svc:
	@
	@
	@ Also, separately save sp_usr and lr_usr
	@ Also, separately save sp_usr and lr_usr
	@
	@
	stmia	r5, {r2 - r4}
	stmia	r0, {r2 - r4}
	stmdb	r5, {sp, lr}^
	stmdb	r0, {sp, lr}^


	@
	@
	@ Enable the alignment trap while in kernel mode
	@ Enable the alignment trap while in kernel mode
@@ -323,7 +339,7 @@ __pabt_svc:


	.align	5
	.align	5
__dabt_usr:
__dabt_usr:
	usr_entry abt
	usr_entry


	@
	@
	@ Call the processor-specific abort handler:
	@ Call the processor-specific abort handler:
@@ -352,7 +368,7 @@ __dabt_usr:


	.align	5
	.align	5
__irq_usr:
__irq_usr:
	usr_entry irq
	usr_entry


	get_thread_info tsk
	get_thread_info tsk
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
@@ -360,6 +376,7 @@ __irq_usr:
	add	r7, r8, #1			@ increment it
	add	r7, r8, #1			@ increment it
	str	r7, [tsk, #TI_PREEMPT]
	str	r7, [tsk, #TI_PREEMPT]
#endif
#endif

	irq_handler
	irq_handler
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
	ldr	r0, [tsk, #TI_PREEMPT]
	ldr	r0, [tsk, #TI_PREEMPT]
@@ -367,6 +384,7 @@ __irq_usr:
	teq	r0, r7
	teq	r0, r7
	strne	r0, [r0, -r0]
	strne	r0, [r0, -r0]
#endif
#endif

	mov	why, #0
	mov	why, #0
	b	ret_to_user
	b	ret_to_user


@@ -374,7 +392,7 @@ __irq_usr:


	.align	5
	.align	5
__und_usr:
__und_usr:
	usr_entry und
	usr_entry


	tst	r3, #PSR_T_BIT			@ Thumb mode?
	tst	r3, #PSR_T_BIT			@ Thumb mode?
	bne	fpundefinstr			@ ignore FP
	bne	fpundefinstr			@ ignore FP
@@ -490,7 +508,7 @@ fpundefinstr:


	.align	5
	.align	5
__pabt_usr:
__pabt_usr:
	usr_entry abt
	usr_entry


	enable_irq				@ Enable interrupts
	enable_irq				@ Enable interrupts
	mov	r0, r2				@ address (pc)
	mov	r0, r2				@ address (pc)
@@ -749,29 +767,41 @@ __kuser_helper_end:
 *
 *
 * Common stub entry macro:
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
 */
 */
	.macro	vector_stub, name, sym, correction=0
	.macro	vector_stub, name, correction=0
	.align	5
	.align	5


vector_\name:
vector_\name:
	ldr	r13, .LCs\sym
	.if \correction
	.if \correction
	sub	lr, lr, #\correction
	sub	lr, lr, #\correction
	.endif
	.endif
	str	lr, [r13]			@ save lr_IRQ

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
	mrs	lr, spsr
	mrs	lr, spsr
	str	lr, [r13, #4]			@ save spsr_IRQ
	str	lr, [sp, #8]		@ save spsr

	@
	@
	@ now branch to the relevant MODE handling routine
	@ Prepare for SVC32 mode.  IRQs remain disabled.
	@
	@
	mrs	r13, cpsr
	mrs	r0, cpsr
	bic	r13, r13, #MODE_MASK
	bic	r0, r0, #MODE_MASK
	orr	r13, r13, #SVC_MODE
	orr	r0, r0, #SVC_MODE
	msr	spsr_cxsf, r13			@ switch to SVC_32 mode
	msr	spsr_cxsf, r0


	and	lr, lr, #15
	@
	@ the branch table must immediately follow this code
	@
	mov	r0, sp
	and	lr, lr, #0x0f
	ldr	lr, [pc, lr, lsl #2]
	ldr	lr, [pc, lr, lsl #2]
	movs	pc, lr				@ Changes mode and branches
	movs	pc, lr			@ branch to handler in SVC mode
	.endm
	.endm


	.globl	__stubs_start
	.globl	__stubs_start
@@ -779,7 +809,7 @@ __stubs_start:
/*
/*
 * Interrupt dispatcher
 * Interrupt dispatcher
 */
 */
	vector_stub	irq, irq, 4
	vector_stub	irq, 4


	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
@@ -802,7 +832,7 @@ __stubs_start:
 * Data abort dispatcher
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
 */
	vector_stub	dabt, abt, 8
	vector_stub	dabt, 8


	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
@@ -825,7 +855,7 @@ __stubs_start:
 * Prefetch abort dispatcher
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
 */
	vector_stub	pabt, abt, 4
	vector_stub	pabt, 4


	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
@@ -848,7 +878,7 @@ __stubs_start:
 * Undef instr entry dispatcher
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
 */
	vector_stub	und, und
	vector_stub	und


	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
@@ -902,13 +932,6 @@ vector_addrexcptn:
.LCvswi:
.LCvswi:
	.word	vector_swi
	.word	vector_swi


.LCsirq:
	.word	__temp_irq
.LCsund:
	.word	__temp_und
.LCsabt:
	.word	__temp_abt

	.globl	__stubs_end
	.globl	__stubs_end
__stubs_end:
__stubs_end:


@@ -930,23 +953,6 @@ __vectors_end:


	.data
	.data


/*
 * Do not reorder these, and do not insert extra data between...
 */

__temp_irq:
	.word	0				@ saved lr_irq
	.word	0				@ saved spsr_irq
	.word	-1				@ old_r0
__temp_und:
	.word	0				@ Saved lr_und
	.word	0				@ Saved spsr_und
	.word	-1				@ old_r0
__temp_abt:
	.word	0				@ Saved lr_abt
	.word	0				@ Saved spsr_abt
	.word	-1				@ old_r0

	.globl	cr_alignment
	.globl	cr_alignment
	.globl	cr_no_alignment
	.globl	cr_no_alignment
cr_alignment:
cr_alignment:
+50 −2
Original line number Original line Diff line number Diff line
@@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user;
struct cpu_cache_fns cpu_cache;
struct cpu_cache_fns cpu_cache;
#endif
#endif


struct stack {
	u32 irq[3];
	u32 abt[3];
	u32 und[3];
} ____cacheline_aligned;

static struct stack stacks[NR_CPUS];

char elf_platform[ELF_PLATFORM_SIZE];
char elf_platform[ELF_PLATFORM_SIZE];
EXPORT_SYMBOL(elf_platform);
EXPORT_SYMBOL(elf_platform);


@@ -307,8 +315,6 @@ static void __init setup_processor(void)
	       cpu_name, processor_id, (int)processor_id & 15,
	       cpu_name, processor_id, (int)processor_id & 15,
	       proc_arch[cpu_architecture()]);
	       proc_arch[cpu_architecture()]);


	dump_cpu_info(smp_processor_id());

	sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
	sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
	elf_hwcap = list->elf_hwcap;
	elf_hwcap = list->elf_hwcap;
@@ -316,6 +322,46 @@ static void __init setup_processor(void)
	cpu_proc_init();
	cpu_proc_init();
}
}


/*
 * cpu_init - initialise one CPU.
 *
 * cpu_init dumps the cache information, initialises SMP specific
 * information, and sets up the per-CPU stacks.
 */
void __init cpu_init(void)
{
	unsigned int cpu = smp_processor_id();
	struct stack *stk = &stacks[cpu];

	if (cpu >= NR_CPUS) {
		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
		BUG();
	}

	dump_cpu_info(cpu);

	/*
	 * setup stacks for re-entrant exception handlers
	 */
	__asm__ (
	"msr	cpsr_c, %1\n\t"
	"add	sp, %0, %2\n\t"
	"msr	cpsr_c, %3\n\t"
	"add	sp, %0, %4\n\t"
	"msr	cpsr_c, %5\n\t"
	"add	sp, %0, %6\n\t"
	"msr	cpsr_c, %7"
	    :
	    : "r" (stk),
	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
	      "I" (offsetof(struct stack, irq[0])),
	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
	      "I" (offsetof(struct stack, abt[0])),
	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
	      "I" (offsetof(struct stack, und[0])),
	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE));
}

static struct machine_desc * __init setup_machine(unsigned int nr)
static struct machine_desc * __init setup_machine(unsigned int nr)
{
{
	struct machine_desc *list;
	struct machine_desc *list;
@@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p)
	paging_init(&meminfo, mdesc);
	paging_init(&meminfo, mdesc);
	request_standard_resources(&meminfo, mdesc);
	request_standard_resources(&meminfo, mdesc);


	cpu_init();

	/*
	/*
	 * Set up various architecture-specific pointers
	 * Set up various architecture-specific pointers
	 */
	 */