Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 299f6ce4 authored by Stephen Rothwell's avatar Stephen Rothwell
Browse files

ppc64: use mem_64.S from powerpc/lib



and remove the same bits from ppc64/lib/string.S.

Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
parent a4a264f1
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -4,13 +4,12 @@

ifeq ($(CONFIG_PPC_MERGE),y)
obj-y			:= string.o
obj-$(CONFIG_PPC64)	+= mem_64.o
endif

obj-y			+= strcase.o
obj-$(CONFIG_PPC32)	+= div64.o copy_32.o checksum_32.o
obj-$(CONFIG_PPC64)	+= checksum_64.o copypage_64.o copyuser_64.o \
			   memcpy_64.o usercopy_64.o
			   memcpy_64.o usercopy_64.o mem_64.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o
ifeq ($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP)	+= locks.o
+0 −106
Original line number Diff line number Diff line
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
	subf	r3,r3,r4
	blr

_GLOBAL(memset)
	neg	r0,r3
	rlwimi	r4,r4,8,16,23
	andi.	r0,r0,7			/* # bytes to be 8-byte aligned */
	rlwimi	r4,r4,16,0,15
	cmplw	cr1,r5,r0		/* do we get that far? */
	rldimi	r4,r4,32,0
	mtcrf	1,r0
	mr	r6,r3
	blt	cr1,8f
	beq+	3f			/* if already 8-byte aligned */
	subf	r5,r0,r5
	bf	31,1f
	stb	r4,0(r6)
	addi	r6,r6,1
1:	bf	30,2f
	sth	r4,0(r6)
	addi	r6,r6,2
2:	bf	29,3f
	stw	r4,0(r6)
	addi	r6,r6,4
3:	srdi.	r0,r5,6
	clrldi	r5,r5,58
	mtctr	r0
	beq	5f
4:	std	r4,0(r6)
	std	r4,8(r6)
	std	r4,16(r6)
	std	r4,24(r6)
	std	r4,32(r6)
	std	r4,40(r6)
	std	r4,48(r6)
	std	r4,56(r6)
	addi	r6,r6,64
	bdnz	4b
5:	srwi.	r0,r5,3
	clrlwi	r5,r5,29
	mtcrf	1,r0
	beq	8f
	bf	29,6f
	std	r4,0(r6)
	std	r4,8(r6)
	std	r4,16(r6)
	std	r4,24(r6)
	addi	r6,r6,32
6:	bf	30,7f
	std	r4,0(r6)
	std	r4,8(r6)
	addi	r6,r6,16
7:	bf	31,8f
	std	r4,0(r6)
	addi	r6,r6,8
8:	cmpwi	r5,0
	mtcrf	1,r5
	beqlr+
	bf	29,9f
	stw	r4,0(r6)
	addi	r6,r6,4
9:	bf	30,10f
	sth	r4,0(r6)
	addi	r6,r6,2
10:	bflr	31
	stb	r4,0(r6)
	blr

_GLOBAL(memmove)
	cmplw	0,r3,r4
	bgt	.backwards_memcpy
	b	.memcpy

_GLOBAL(backwards_memcpy)
	rlwinm.	r7,r5,32-3,3,31		/* r0 = r5 >> 3 */
	add	r6,r3,r5
	add	r4,r4,r5
	beq	2f
	andi.	r0,r6,3
	mtctr	r7
	bne	5f
1:	lwz	r7,-4(r4)
	lwzu	r8,-8(r4)
	stw	r7,-4(r6)
	stwu	r8,-8(r6)
	bdnz	1b
	andi.	r5,r5,7
2:	cmplwi	0,r5,4
	blt	3f
	lwzu	r0,-4(r4)
	subi	r5,r5,4
	stwu	r0,-4(r6)
3:	cmpwi	0,r5,0
	beqlr
	mtctr	r5
4:	lbzu	r0,-1(r4)
	stbu	r0,-1(r6)
	bdnz	4b
	blr
5:	mtctr	r0
6:	lbzu	r7,-1(r4)
	stbu	r7,-1(r6)
	bdnz	6b
	subf	r5,r0,r5
	rlwinm.	r7,r5,32-3,3,31
	beq	2b
	mtctr	r7
	b	1b
	
_GLOBAL(memcmp)
	cmpwi	0,r5,0
	ble-	2f