Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a809d460 authored by Ralf Baechle's avatar Ralf Baechle
Browse files

MIPS: Fix gigaton of warning building with microMIPS.



With binutils 2.24 the attempt to switch with microMIPS mode to MIPS III
mode through .set mips3 results in *lots* of warnings like

{standard input}: Assembler messages:
{standard input}:397: Warning: the 64-bit MIPS architecture does not support the `smartmips' extension

during a kernel build.  Fixed by using .set arch=r4000 instead.

This breaks support for building the kernel with binutils 2.13 which
was supported for 32 bit kernels only anyway and 2.14 which was a bad
vintage for MIPS anyway.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 30ee615b
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ LEAF(alchemy_sleep_au1000)

	/* cache following instructions, as memory gets put to sleep */
	la	t0, 1f
	.set	mips3
	.set	arch=r4000
	cache	0x14, 0(t0)
	cache	0x14, 32(t0)
	cache	0x14, 64(t0)
@@ -121,7 +121,7 @@ LEAF(alchemy_sleep_au1550)

	/* cache following instructions, as memory gets put to sleep */
	la	t0, 1f
	.set	mips3
	.set	arch=r4000
	cache	0x14, 0(t0)
	cache	0x14, 32(t0)
	cache	0x14, 64(t0)
@@ -163,7 +163,7 @@ LEAF(alchemy_sleep_au1300)
	la	t1, 4f
	subu	t2, t1, t0

	.set	mips3
	.set	arch=r4000

1:	cache	0x14, 0(t0)
	subu	t2, t2, 32
+2 −2
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ symbol = value

#define PREF(hint,addr)					\
		.set	push;				\
		.set	mips4;				\
		.set	arch=r5000;			\
		pref	hint, addr;			\
		.set	pop

@@ -159,7 +159,7 @@ symbol = value

#define PREFX(hint,addr)				\
		.set	push;				\
		.set	mips4;				\
		.set	arch=r5000;			\
		prefx	hint, addr;			\
		.set	pop

+20 −20
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
		int temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%0, %1		# atomic_add		\n"
		"	addu	%0, %2					\n"
		"	sc	%0, %1					\n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	ll	%0, %1		# atomic_add	\n"
			"	addu	%0, %2				\n"
			"	sc	%0, %1				\n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
		int temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%0, %1		# atomic_sub		\n"
		"	subu	%0, %2					\n"
		"	sc	%0, %1					\n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	ll	%0, %1		# atomic_sub	\n"
			"	subu	%0, %2				\n"
			"	sc	%0, %1				\n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
		int temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%1, %2		# atomic_add_return	\n"
		"	addu	%0, %1, %3				\n"
		"	sc	%0, %2					\n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	ll	%1, %2	# atomic_add_return	\n"
			"	addu	%0, %1, %3			\n"
			"	sc	%0, %2				\n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
		int temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%1, %2		# atomic_sub_return	\n"
		"	subu	%0, %1, %3				\n"
		"	sc	%0, %2					\n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	ll	%1, %2	# atomic_sub_return	\n"
			"	subu	%0, %1, %3			\n"
			"	sc	%0, %2				\n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
		int temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
		"	subu	%0, %1, %3				\n"
		"	bltz	%0, 1f					\n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
		int temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
		"	subu	%0, %1, %3				\n"
		"	bltz	%0, 1f					\n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
		long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%0, %1		# atomic64_add		\n"
		"	daddu	%0, %2					\n"
		"	scd	%0, %1					\n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	lld	%0, %1		# atomic64_add	\n"
			"	daddu	%0, %2				\n"
			"	scd	%0, %1				\n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
		long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%0, %1		# atomic64_sub		\n"
		"	dsubu	%0, %2					\n"
		"	scd	%0, %1					\n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	lld	%0, %1		# atomic64_sub	\n"
			"	dsubu	%0, %2				\n"
			"	scd	%0, %1				\n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
		long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%1, %2		# atomic64_add_return	\n"
		"	daddu	%0, %1, %3				\n"
		"	scd	%0, %2					\n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	lld	%1, %2	# atomic64_add_return	\n"
			"	daddu	%0, %1, %3			\n"
			"	scd	%0, %2				\n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
		long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%1, %2		# atomic64_sub_return	\n"
		"	dsubu	%0, %1, %3				\n"
		"	scd	%0, %2					\n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	lld	%1, %2	# atomic64_sub_return	\n"
			"	dsubu	%0, %1, %3			\n"
			"	scd	%0, %2				\n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
		long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
		"	dsubu	%0, %1, %3				\n"
		"	bltz	%0, 1f					\n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
		long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
		"	dsubu	%0, %1, %3				\n"
		"	bltz	%0, 1f					\n"
+14 −14
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	" __LL "%0, %1			# set_bit	\n"
		"	or	%0, %2					\n"
		"	" __SC	"%0, %1					\n"
@@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
	} else if (kernel_uses_llsc) {
		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL "%0, %1		# set_bit	\n"
			"	or	%0, %2				\n"
			"	" __SC	"%0, %1				\n"
@@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	" __LL "%0, %1			# clear_bit	\n"
		"	and	%0, %2					\n"
		"	" __SC "%0, %1					\n"
@@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
	} else if (kernel_uses_llsc) {
		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL "%0, %1		# clear_bit	\n"
			"	and	%0, %2				\n"
			"	" __SC "%0, %1				\n"
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
		unsigned long temp;

		__asm__ __volatile__(
		"	.set	mips3				\n"
		"	.set	arch=r4000			\n"
		"1:	" __LL "%0, %1		# change_bit	\n"
		"	xor	%0, %2				\n"
		"	" __SC	"%0, %1				\n"
@@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL "%0, %1		# change_bit	\n"
			"	xor	%0, %2				\n"
			"	" __SC	"%0, %1				\n"
@@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
		unsigned long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
		"	or	%2, %0, %3				\n"
		"	" __SC	"%2, %1					\n"
@@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL "%0, %1	# test_and_set_bit	\n"
			"	or	%2, %0, %3			\n"
			"	" __SC	"%2, %1				\n"
@@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
		unsigned long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
		"	or	%2, %0, %3				\n"
		"	" __SC	"%2, %1					\n"
@@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL "%0, %1	# test_and_set_bit	\n"
			"	or	%2, %0, %3			\n"
			"	" __SC	"%2, %1				\n"
@@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
		unsigned long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
		"	or	%2, %0, %3				\n"
		"	xor	%2, %3					\n"
@@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
			"	or	%2, %0, %3			\n"
			"	xor	%2, %3				\n"
@@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
		unsigned long temp;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
		"	xor	%2, %0, %3				\n"
		"	" __SC	"%2, %1					\n"
@@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	" __LL	"%0, %1 # test_and_change_bit	\n"
			"	xor	%2, %0, %3			\n"
			"	" __SC	"\t%2, %1			\n"
+10 −10
Original line number Diff line number Diff line
@@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
		unsigned long dummy;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	ll	%0, %3			# xchg_u32	\n"
		"	.set	mips0					\n"
		"	move	%2, %z4					\n"
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	.set	mips0					\n"
@@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	ll	%0, %3		# xchg_u32	\n"
			"	.set	mips0				\n"
			"	move	%2, %z4				\n"
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	sc	%2, %1				\n"
			"	.set	mips0				\n"
			: "=&r" (retval), "=m" (*m), "=&r" (dummy)
@@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
		unsigned long dummy;

		__asm__ __volatile__(
		"	.set	mips3					\n"
		"	.set	arch=r4000				\n"
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
@@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)

		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	.set	arch=r4000			\n"
			"	lld	%0, %3		# xchg_u64	\n"
			"	move	%2, %z4				\n"
			"	scd	%2, %1				\n"
@@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
		__asm__ __volatile__(					\
		"	.set	push				\n"	\
		"	.set	noat				\n"	\
		"	.set	mips3				\n"	\
		"	.set	arch=r4000			\n"	\
		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
		"	bne	%0, %z3, 2f			\n"	\
		"	.set	mips0				\n"	\
		"	move	$1, %z4				\n"	\
		"	.set	mips3				\n"	\
		"	.set	arch=r4000			\n"	\
		"	" st "	$1, %1				\n"	\
		"	beqzl	$1, 1b				\n"	\
		"2:						\n"	\
@@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
		__asm__ __volatile__(					\
		"	.set	push				\n"	\
		"	.set	noat				\n"	\
		"	.set	mips3				\n"	\
		"	.set	arch=r4000			\n"	\
		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
		"	bne	%0, %z3, 2f			\n"	\
		"	.set	mips0				\n"	\
		"	move	$1, %z4				\n"	\
		"	.set	mips3				\n"	\
		"	.set	arch=r4000			\n"	\
		"	" st "	$1, %1				\n"	\
		"	beqz	$1, 1b				\n"	\
		"	.set	pop				\n"	\
Loading