Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8a583c0a authored by Andreas Schwab's avatar Andreas Schwab Committed by Michael Ellerman
Browse files

powerpc: Fix invalid use of register expressions



binutils >= 2.26 now warns about misuse of register expressions in
assembler operands that are actually literals, for example:

  arch/powerpc/kernel/entry_64.S:535: Warning: invalid register expression

In practice these are almost all uses of r0 that should just be a
literal 0.

Signed-off-by: default avatarAndreas Schwab <schwab@linux-m68k.org>
[mpe: Mention r0 is almost always the culprit, fold in purgatory change]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 21a0e8c1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -439,7 +439,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.machine push ;					\
.machine "power4" ;				\
       lis     scratch,0x60000000@h;		\
       dcbt    r0,scratch,0b01010;		\
       dcbt    0,scratch,0b01010;		\
.machine pop

/*
+1 −1
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ nothing_to_copy:
	sld	r3, r3, r0
	li	r0, 0
1:
	dcbf	r0,r3
	dcbf	0,r3
	addi	r3,r3,0x20
	bdnz	1b

+7 −7
Original line number Diff line number Diff line
@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
.machine push
.machine "power4"
	/* setup read stream 0  */
	dcbt	r0,r4,0b01000  	/* addr from */
	dcbt	r0,r7,0b01010   /* length and depth from */
	dcbt	0,r4,0b01000  	/* addr from */
	dcbt	0,r7,0b01010   /* length and depth from */
	/* setup write stream 1 */
	dcbtst	r0,r9,0b01000   /* addr to */
	dcbtst	r0,r10,0b01010  /* length and depth to */
	dcbtst	0,r9,0b01000   /* addr to */
	dcbtst	0,r10,0b01010  /* length and depth to */
	eieio
	dcbt	r0,r8,0b01010	/* all streams GO */
	dcbt	0,r8,0b01010	/* all streams GO */
.machine pop

#ifdef CONFIG_ALTIVEC
@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
	li	r12,112

	.align	5
1:	lvx	v7,r0,r4
1:	lvx	v7,0,r4
	lvx	v6,r4,r6
	lvx	v5,r4,r7
	lvx	v4,r4,r8
@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
	lvx	v1,r4,r11
	lvx	v0,r4,r12
	addi	r4,r4,128
	stvx	v7,r0,r3
	stvx	v7,0,r3
	stvx	v6,r3,r6
	stvx	v5,r3,r7
	stvx	v4,r3,r8
+33 −33
Original line number Diff line number Diff line
@@ -315,13 +315,13 @@ err1; stb r0,0(r3)
.machine push
.machine "power4"
	/* setup read stream 0 */
	dcbt	r0,r6,0b01000   /* addr from */
	dcbt	r0,r7,0b01010   /* length and depth from */
	dcbt	0,r6,0b01000   /* addr from */
	dcbt	0,r7,0b01010   /* length and depth from */
	/* setup write stream 1 */
	dcbtst	r0,r9,0b01000   /* addr to */
	dcbtst	r0,r10,0b01010  /* length and depth to */
	dcbtst	0,r9,0b01000   /* addr to */
	dcbtst	0,r10,0b01010  /* length and depth to */
	eieio
	dcbt	r0,r8,0b01010	/* all streams GO */
	dcbt	0,r8,0b01010	/* all streams GO */
.machine pop

	beq	cr1,.Lunwind_stack_nonvmx_copy
@@ -376,26 +376,26 @@ err3; std r0,0(r3)
	li	r11,48

	bf	cr7*4+3,5f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
	addi	r4,r4,16
err3;	stvx	v1,r0,r3
err3;	stvx	v1,0,r3
	addi	r3,r3,16

5:	bf	cr7*4+2,6f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
err3;	lvx	v0,r4,r9
	addi	r4,r4,32
err3;	stvx	v1,r0,r3
err3;	stvx	v1,0,r3
err3;	stvx	v0,r3,r9
	addi	r3,r3,32

6:	bf	cr7*4+1,7f
err3;	lvx	v3,r0,r4
err3;	lvx	v3,0,r4
err3;	lvx	v2,r4,r9
err3;	lvx	v1,r4,r10
err3;	lvx	v0,r4,r11
	addi	r4,r4,64
err3;	stvx	v3,r0,r3
err3;	stvx	v3,0,r3
err3;	stvx	v2,r3,r9
err3;	stvx	v1,r3,r10
err3;	stvx	v0,r3,r11
@@ -421,7 +421,7 @@ err3; stvx v0,r3,r11
	 */
	.align	5
8:
err4;	lvx	v7,r0,r4
err4;	lvx	v7,0,r4
err4;	lvx	v6,r4,r9
err4;	lvx	v5,r4,r10
err4;	lvx	v4,r4,r11
@@ -430,7 +430,7 @@ err4; lvx v2,r4,r14
err4;	lvx	v1,r4,r15
err4;	lvx	v0,r4,r16
	addi	r4,r4,128
err4;	stvx	v7,r0,r3
err4;	stvx	v7,0,r3
err4;	stvx	v6,r3,r9
err4;	stvx	v5,r3,r10
err4;	stvx	v4,r3,r11
@@ -451,29 +451,29 @@ err4; stvx v0,r3,r16
	mtocrf	0x01,r6

	bf	cr7*4+1,9f
err3;	lvx	v3,r0,r4
err3;	lvx	v3,0,r4
err3;	lvx	v2,r4,r9
err3;	lvx	v1,r4,r10
err3;	lvx	v0,r4,r11
	addi	r4,r4,64
err3;	stvx	v3,r0,r3
err3;	stvx	v3,0,r3
err3;	stvx	v2,r3,r9
err3;	stvx	v1,r3,r10
err3;	stvx	v0,r3,r11
	addi	r3,r3,64

9:	bf	cr7*4+2,10f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
err3;	lvx	v0,r4,r9
	addi	r4,r4,32
err3;	stvx	v1,r0,r3
err3;	stvx	v1,0,r3
err3;	stvx	v0,r3,r9
	addi	r3,r3,32

10:	bf	cr7*4+3,11f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
	addi	r4,r4,16
err3;	stvx	v1,r0,r3
err3;	stvx	v1,0,r3
	addi	r3,r3,16

	/* Up to 15B to go */
@@ -553,25 +553,25 @@ err3; lvx v0,0,r4
	addi	r4,r4,16

	bf	cr7*4+3,5f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
	addi	r4,r4,16
err3;	stvx	v8,r0,r3
err3;	stvx	v8,0,r3
	addi	r3,r3,16
	vor	v0,v1,v1

5:	bf	cr7*4+2,6f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
err3;	lvx	v0,r4,r9
	VPERM(v9,v1,v0,v16)
	addi	r4,r4,32
err3;	stvx	v8,r0,r3
err3;	stvx	v8,0,r3
err3;	stvx	v9,r3,r9
	addi	r3,r3,32

6:	bf	cr7*4+1,7f
err3;	lvx	v3,r0,r4
err3;	lvx	v3,0,r4
	VPERM(v8,v0,v3,v16)
err3;	lvx	v2,r4,r9
	VPERM(v9,v3,v2,v16)
@@ -580,7 +580,7 @@ err3; lvx v1,r4,r10
err3;	lvx	v0,r4,r11
	VPERM(v11,v1,v0,v16)
	addi	r4,r4,64
err3;	stvx	v8,r0,r3
err3;	stvx	v8,0,r3
err3;	stvx	v9,r3,r9
err3;	stvx	v10,r3,r10
err3;	stvx	v11,r3,r11
@@ -606,7 +606,7 @@ err3; stvx v11,r3,r11
	 */
	.align	5
8:
err4;	lvx	v7,r0,r4
err4;	lvx	v7,0,r4
	VPERM(v8,v0,v7,v16)
err4;	lvx	v6,r4,r9
	VPERM(v9,v7,v6,v16)
@@ -623,7 +623,7 @@ err4; lvx v1,r4,r15
err4;	lvx	v0,r4,r16
	VPERM(v15,v1,v0,v16)
	addi	r4,r4,128
err4;	stvx	v8,r0,r3
err4;	stvx	v8,0,r3
err4;	stvx	v9,r3,r9
err4;	stvx	v10,r3,r10
err4;	stvx	v11,r3,r11
@@ -644,7 +644,7 @@ err4; stvx v15,r3,r16
	mtocrf	0x01,r6

	bf	cr7*4+1,9f
err3;	lvx	v3,r0,r4
err3;	lvx	v3,0,r4
	VPERM(v8,v0,v3,v16)
err3;	lvx	v2,r4,r9
	VPERM(v9,v3,v2,v16)
@@ -653,27 +653,27 @@ err3; lvx v1,r4,r10
err3;	lvx	v0,r4,r11
	VPERM(v11,v1,v0,v16)
	addi	r4,r4,64
err3;	stvx	v8,r0,r3
err3;	stvx	v8,0,r3
err3;	stvx	v9,r3,r9
err3;	stvx	v10,r3,r10
err3;	stvx	v11,r3,r11
	addi	r3,r3,64

9:	bf	cr7*4+2,10f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
err3;	lvx	v0,r4,r9
	VPERM(v9,v1,v0,v16)
	addi	r4,r4,32
err3;	stvx	v8,r0,r3
err3;	stvx	v8,0,r3
err3;	stvx	v9,r3,r9
	addi	r3,r3,32

10:	bf	cr7*4+3,11f
err3;	lvx	v1,r0,r4
err3;	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
	addi	r4,r4,16
err3;	stvx	v8,r0,r3
err3;	stvx	v8,0,r3
	addi	r3,r3,16

	/* Up to 15B to go */
+33 −33
Original line number Diff line number Diff line
@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)

.machine push
.machine "power4"
	dcbt	r0,r6,0b01000
	dcbt	r0,r7,0b01010
	dcbtst	r0,r9,0b01000
	dcbtst	r0,r10,0b01010
	dcbt	0,r6,0b01000
	dcbt	0,r7,0b01010
	dcbtst	0,r9,0b01000
	dcbtst	0,r10,0b01010
	eieio
	dcbt	r0,r8,0b01010	/* GO */
	dcbt	0,r8,0b01010	/* GO */
.machine pop

	beq	cr1,.Lunwind_stack_nonvmx_copy
@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
	li	r11,48

	bf	cr7*4+3,5f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	addi	r4,r4,16
	stvx	v1,r0,r3
	stvx	v1,0,r3
	addi	r3,r3,16

5:	bf	cr7*4+2,6f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	lvx	v0,r4,r9
	addi	r4,r4,32
	stvx	v1,r0,r3
	stvx	v1,0,r3
	stvx	v0,r3,r9
	addi	r3,r3,32

6:	bf	cr7*4+1,7f
	lvx	v3,r0,r4
	lvx	v3,0,r4
	lvx	v2,r4,r9
	lvx	v1,r4,r10
	lvx	v0,r4,r11
	addi	r4,r4,64
	stvx	v3,r0,r3
	stvx	v3,0,r3
	stvx	v2,r3,r9
	stvx	v1,r3,r10
	stvx	v0,r3,r11
@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
	 */
	.align	5
8:
	lvx	v7,r0,r4
	lvx	v7,0,r4
	lvx	v6,r4,r9
	lvx	v5,r4,r10
	lvx	v4,r4,r11
@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
	lvx	v1,r4,r15
	lvx	v0,r4,r16
	addi	r4,r4,128
	stvx	v7,r0,r3
	stvx	v7,0,r3
	stvx	v6,r3,r9
	stvx	v5,r3,r10
	stvx	v4,r3,r11
@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
	mtocrf	0x01,r6

	bf	cr7*4+1,9f
	lvx	v3,r0,r4
	lvx	v3,0,r4
	lvx	v2,r4,r9
	lvx	v1,r4,r10
	lvx	v0,r4,r11
	addi	r4,r4,64
	stvx	v3,r0,r3
	stvx	v3,0,r3
	stvx	v2,r3,r9
	stvx	v1,r3,r10
	stvx	v0,r3,r11
	addi	r3,r3,64

9:	bf	cr7*4+2,10f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	lvx	v0,r4,r9
	addi	r4,r4,32
	stvx	v1,r0,r3
	stvx	v1,0,r3
	stvx	v0,r3,r9
	addi	r3,r3,32

10:	bf	cr7*4+3,11f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	addi	r4,r4,16
	stvx	v1,r0,r3
	stvx	v1,0,r3
	addi	r3,r3,16

	/* Up to 15B to go */
@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
	addi	r4,r4,16

	bf	cr7*4+3,5f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
	addi	r4,r4,16
	stvx	v8,r0,r3
	stvx	v8,0,r3
	addi	r3,r3,16
	vor	v0,v1,v1

5:	bf	cr7*4+2,6f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
	lvx	v0,r4,r9
	VPERM(v9,v1,v0,v16)
	addi	r4,r4,32
	stvx	v8,r0,r3
	stvx	v8,0,r3
	stvx	v9,r3,r9
	addi	r3,r3,32

6:	bf	cr7*4+1,7f
	lvx	v3,r0,r4
	lvx	v3,0,r4
	VPERM(v8,v0,v3,v16)
	lvx	v2,r4,r9
	VPERM(v9,v3,v2,v16)
@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
	lvx	v0,r4,r11
	VPERM(v11,v1,v0,v16)
	addi	r4,r4,64
	stvx	v8,r0,r3
	stvx	v8,0,r3
	stvx	v9,r3,r9
	stvx	v10,r3,r10
	stvx	v11,r3,r11
@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
	 */
	.align	5
8:
	lvx	v7,r0,r4
	lvx	v7,0,r4
	VPERM(v8,v0,v7,v16)
	lvx	v6,r4,r9
	VPERM(v9,v7,v6,v16)
@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
	lvx	v0,r4,r16
	VPERM(v15,v1,v0,v16)
	addi	r4,r4,128
	stvx	v8,r0,r3
	stvx	v8,0,r3
	stvx	v9,r3,r9
	stvx	v10,r3,r10
	stvx	v11,r3,r11
@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
	mtocrf	0x01,r6

	bf	cr7*4+1,9f
	lvx	v3,r0,r4
	lvx	v3,0,r4
	VPERM(v8,v0,v3,v16)
	lvx	v2,r4,r9
	VPERM(v9,v3,v2,v16)
@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
	lvx	v0,r4,r11
	VPERM(v11,v1,v0,v16)
	addi	r4,r4,64
	stvx	v8,r0,r3
	stvx	v8,0,r3
	stvx	v9,r3,r9
	stvx	v10,r3,r10
	stvx	v11,r3,r11
	addi	r3,r3,64

9:	bf	cr7*4+2,10f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
	lvx	v0,r4,r9
	VPERM(v9,v1,v0,v16)
	addi	r4,r4,32
	stvx	v8,r0,r3
	stvx	v8,0,r3
	stvx	v9,r3,r9
	addi	r3,r3,32

10:	bf	cr7*4+3,11f
	lvx	v1,r0,r4
	lvx	v1,0,r4
	VPERM(v8,v0,v1,v16)
	addi	r4,r4,16
	stvx	v8,r0,r3
	stvx	v8,0,r3
	addi	r3,r3,16

	/* Up to 15B to go */
Loading