Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 752a6422 authored by Ulrich Weigand's avatar Ulrich Weigand Committed by Anton Blanchard
Browse files

powerpc: Fix unsafe accesses to parameter area in ELFv2



Some of the assembler files in lib/ make use of the fact that in the
ELFv1 ABI, the caller guarantees to provide stack space to save the
parameter registers r3 ... r10.  This guarantee is no longer present
in ELFv2 for functions that have no variable argument list and no
more than 8 arguments.

Change the affected routines to temporarily store registers in the
red zone and/or the top of their own stack frame (in the space
provided to save r31 .. r29, which is actually not used in these
routines).

In opal_query_takeover, simply always allocate a stack frame;
the routine is not performance critical.

Signed-off-by: default avatarUlrich Weigand <ulrich.weigand@de.ibm.com>
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
parent b37c10d1
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -56,15 +56,15 @@ _GLOBAL(copypage_power7)

#ifdef CONFIG_ALTIVEC
	mflr	r0
	std	r3,STK_PARAM(R3)(r1)
	std	r4,STK_PARAM(R4)(r1)
	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
	std	r0,16(r1)
	stdu	r1,-STACKFRAMESIZE(r1)
	bl	enter_vmx_copy
	cmpwi	r3,0
	ld	r0,STACKFRAMESIZE+16(r1)
	ld	r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
	ld	r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
	ld	r3,STK_REG(R31)(r1)
	ld	r4,STK_REG(R30)(r1)
	mtlr	r0

	li	r0,(PAGE_SIZE/128)
+12 −12
Original line number Diff line number Diff line
@@ -85,9 +85,9 @@
.Lexit:
	addi	r1,r1,STACKFRAMESIZE
.Ldo_err1:
	ld	r3,STK_PARAM(R3)(r1)
	ld	r4,STK_PARAM(R4)(r1)
	ld	r5,STK_PARAM(R5)(r1)
	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	ld	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
	ld	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
	b	__copy_tofrom_user_base


@@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7)
	cmpldi	r5,16
	cmpldi	cr1,r5,4096

	std	r3,STK_PARAM(R3)(r1)
	std	r4,STK_PARAM(R4)(r1)
	std	r5,STK_PARAM(R5)(r1)
	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)

	blt	.Lshort_copy
	bgt	cr1,.Lvmx_copy
#else
	cmpldi	r5,16

	std	r3,STK_PARAM(R3)(r1)
	std	r4,STK_PARAM(R4)(r1)
	std	r5,STK_PARAM(R5)(r1)
	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)

	blt	.Lshort_copy
#endif
@@ -298,9 +298,9 @@ err1; stb r0,0(r3)
	bl	enter_vmx_usercopy
	cmpwi	cr1,r3,0
	ld	r0,STACKFRAMESIZE+16(r1)
	ld	r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
	ld	r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
	ld	r5,STACKFRAMESIZE+STK_PARAM(R5)(r1)
	ld	r3,STK_REG(R31)(r1)
	ld	r4,STK_REG(R30)(r1)
	ld	r5,STK_REG(R29)(r1)
	mtlr	r0

	/*
+4 −4
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@
	.align	7
_GLOBAL(memcpy)
BEGIN_FTR_SECTION
	std	r3,STK_PARAM(R3)(r1)	/* save destination pointer for return value */
	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* save destination pointer for return value */
FTR_SECTION_ELSE
#ifndef SELFTEST
	b	memcpy_power7
@@ -73,7 +73,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2:	bf	cr7*4+3,3f
	lbz	r9,8(r4)
	stb	r9,0(r3)
3:	ld	r3,STK_PARAM(R3)(r1)	/* return dest pointer */
3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
	blr

.Lsrc_unaligned:
@@ -156,7 +156,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2:	bf	cr7*4+3,3f
	rotldi	r9,r9,8
	stb	r9,0(r3)
3:	ld	r3,STK_PARAM(R3)(r1)	/* return dest pointer */
3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
	blr

.Ldst_unaligned:
@@ -201,5 +201,5 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
3:	bf	cr7*4+3,4f
	lbz	r0,0(r4)
	stb	r0,0(r3)
4:	ld	r3,STK_PARAM(R3)(r1)	/* return dest pointer */
4:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
	blr
+10 −10
Original line number Diff line number Diff line
@@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7)
	cmpldi	r5,16
	cmpldi	cr1,r5,4096

	std	r3,STK_PARAM(R1)(r1)
	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)

	blt	.Lshort_copy
	bgt	cr1,.Lvmx_copy
#else
	cmpldi	r5,16

	std	r3,STK_PARAM(R1)(r1)
	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)

	blt	.Lshort_copy
#endif
@@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7)
	lbz	r0,0(r4)
	stb	r0,0(r3)

15:	ld	r3,STK_PARAM(R3)(r1)
15:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	blr

.Lunwind_stack_nonvmx_copy:
@@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7)
#ifdef CONFIG_ALTIVEC
.Lvmx_copy:
	mflr	r0
	std	r4,STK_PARAM(R4)(r1)
	std	r5,STK_PARAM(R5)(r1)
	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
	std	r0,16(r1)
	stdu	r1,-STACKFRAMESIZE(r1)
	bl	enter_vmx_copy
	cmpwi	cr1,r3,0
	ld	r0,STACKFRAMESIZE+16(r1)
	ld	r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
	ld	r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
	ld	r5,STACKFRAMESIZE+STK_PARAM(R5)(r1)
	ld	r3,STK_REG(R31)(r1)
	ld	r4,STK_REG(R30)(r1)
	ld	r5,STK_REG(R29)(r1)
	mtlr	r0

	/*
@@ -447,7 +447,7 @@ _GLOBAL(memcpy_power7)
	stb	r0,0(r3)

15:	addi	r1,r1,STACKFRAMESIZE
	ld	r3,STK_PARAM(R3)(r1)
	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	b	exit_vmx_copy		/* tail call optimise */

.Lvmx_unaligned_copy:
@@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7)
	stb	r0,0(r3)

15:	addi	r1,r1,STACKFRAMESIZE
	ld	r3,STK_PARAM(R3)(r1)
	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
	b	exit_vmx_copy		/* tail call optimise */
#endif /* CONFiG_ALTIVEC */
+2 −0
Original line number Diff line number Diff line
@@ -21,11 +21,13 @@
_GLOBAL(opal_query_takeover)
	mfcr	r0
	stw	r0,8(r1)
	stdu	r1,-STACKFRAMESIZE(r1)
	std	r3,STK_PARAM(R3)(r1)
	std	r4,STK_PARAM(R4)(r1)
	li	r3,H_HAL_TAKEOVER
	li	r4,H_HAL_TAKEOVER_QUERY_MAGIC
	HVSC
	addi	r1,r1,STACKFRAMESIZE
	ld	r10,STK_PARAM(R3)(r1)
	std	r4,0(r10)
	ld	r10,STK_PARAM(R4)(r1)