Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42990701 authored by Matt Fleming's avatar Matt Fleming Committed by Paul Mundt
Browse files

sh: Relax inline assembly constraints



When dereferencing the memory address contained in a register and
modifying the value at that memory address, the register should not be
listed in the inline asm outputs. The value at the memory address is an
output (which is taken care of with the "memory" clobber), not the register.

Signed-off-by: default avatarMatt Fleming <matt@console-pimps.org>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent e4e3c3f1
Loading
Loading
Loading
Loading
+36 −36
Original line number Diff line number Diff line
@@ -13,11 +13,11 @@ static inline void set_bit(int nr, volatile void * addr)
	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! set_bit		\n\t"
		"or		%3, %0				\n\t"
		"or		%2, %0				\n\t"
		"movco.l	%0, @%1				\n\t"
		"bf		1b				\n\t"
		: "=&z" (tmp), "=r" (a)
		: "1" (a), "r" (mask)
		: "=&z" (tmp)
		: "r" (a), "r" (mask)
		: "t", "memory"
	);
}
@@ -34,11 +34,11 @@ static inline void clear_bit(int nr, volatile void * addr)
	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! clear_bit		\n\t"
		"and		%3, %0				\n\t"
		"and		%2, %0				\n\t"
		"movco.l	%0, @%1				\n\t"
		"bf		1b				\n\t"
		: "=&z" (tmp), "=r" (a)
		: "1" (a), "r" (~mask)
		: "=&z" (tmp)
		: "r" (a), "r" (~mask)
		: "t", "memory"
	);
}
@@ -55,11 +55,11 @@ static inline void change_bit(int nr, volatile void * addr)
	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! change_bit		\n\t"
		"xor		%3, %0				\n\t"
		"xor		%2, %0				\n\t"
		"movco.l	%0, @%1				\n\t"
		"bf		1b				\n\t"
		: "=&z" (tmp), "=r" (a)
		: "1" (a), "r" (mask)
		: "=&z" (tmp)
		: "r" (a), "r" (mask)
		: "t", "memory"
	);
}
@@ -75,14 +75,14 @@ static inline int test_and_set_bit(int nr, volatile void * addr)

	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! test_and_set_bit	\n\t"
		"mov		%0, %2				\n\t"
		"or		%4, %0				\n\t"
		"movco.l	%0, @%1				\n\t"
		"movli.l	@%2, %0	! test_and_set_bit	\n\t"
		"mov		%0, %1				\n\t"
		"or		%3, %0				\n\t"
		"movco.l	%0, @%2				\n\t"
		"bf		1b				\n\t"
		"and		%4, %2				\n\t"
		: "=&z" (tmp), "=r" (a), "=&r" (retval)
		: "1" (a), "r" (mask)
		"and		%3, %1				\n\t"
		: "=&z" (tmp), "=&r" (retval)
		: "r" (a), "r" (mask)
		: "t", "memory"
	);

@@ -100,15 +100,15 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)

	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! test_and_clear_bit	\n\t"
		"mov		%0, %2				\n\t"
		"and		%5, %0				\n\t"
		"movco.l	%0, @%1				\n\t"
		"movli.l	@%2, %0	! test_and_clear_bit	\n\t"
		"mov		%0, %1				\n\t"
		"and		%4, %0				\n\t"
		"movco.l	%0, @%2				\n\t"
		"bf		1b				\n\t"
		"and		%4, %2				\n\t"
		"and		%3, %1				\n\t"
		"synco						\n\t"
		: "=&z" (tmp), "=r" (a), "=&r" (retval)
		: "1" (a), "r" (mask), "r" (~mask)
		: "=&z" (tmp), "=&r" (retval)
		: "r" (a), "r" (mask), "r" (~mask)
		: "t", "memory"
	);

@@ -126,15 +126,15 @@ static inline int test_and_change_bit(int nr, volatile void * addr)

	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! test_and_change_bit	\n\t"
		"mov		%0, %2				\n\t"
		"xor		%4, %0				\n\t"
		"movco.l	%0, @%1				\n\t"
		"movli.l	@%2, %0	! test_and_change_bit	\n\t"
		"mov		%0, %1				\n\t"
		"xor		%3, %0				\n\t"
		"movco.l	%0, @%2				\n\t"
		"bf		1b				\n\t"
		"and		%4, %2				\n\t"
		"and		%3, %1				\n\t"
		"synco						\n\t"
		: "=&z" (tmp), "=r" (a), "=&r" (retval)
		: "1" (a), "r" (mask)
		: "=&z" (tmp), "=&r" (retval)
		: "r" (a), "r" (mask)
		: "t", "memory"
	);

+19 −19
Original line number Diff line number Diff line
@@ -8,14 +8,14 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)

	__asm__ __volatile__ (
		"1:					\n\t"
		"movli.l	@%1, %0	! xchg_u32	\n\t"
		"mov		%0, %2			\n\t"
		"mov		%4, %0			\n\t"
		"movco.l	%0, @%1			\n\t"
		"movli.l	@%2, %0	! xchg_u32	\n\t"
		"mov		%0, %1			\n\t"
		"mov		%3, %0			\n\t"
		"movco.l	%0, @%2			\n\t"
		"bf		1b			\n\t"
		"synco					\n\t"
		: "=&z"(tmp), "=r" (m), "=&r" (retval)
		: "1" (m), "r" (val)
		: "=&z"(tmp), "=&r" (retval)
		: "r" (m), "r" (val)
		: "t", "memory"
	);

@@ -29,14 +29,14 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)

	__asm__ __volatile__ (
		"1:					\n\t"
		"movli.l	@%1, %0	! xchg_u8	\n\t"
		"mov		%0, %2			\n\t"
		"mov		%4, %0			\n\t"
		"movco.l	%0, @%1			\n\t"
		"movli.l	@%2, %0	! xchg_u8	\n\t"
		"mov		%0, %1			\n\t"
		"mov		%3, %0			\n\t"
		"movco.l	%0, @%2			\n\t"
		"bf		1b			\n\t"
		"synco					\n\t"
		: "=&z"(tmp), "=r" (m), "=&r" (retval)
		: "1" (m), "r" (val & 0xff)
		: "=&z"(tmp), "=&r" (retval)
		: "r" (m), "r" (val & 0xff)
		: "t", "memory"
	);

@@ -51,17 +51,17 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)

	__asm__ __volatile__ (
		"1:						\n\t"
		"movli.l	@%1, %0	! __cmpxchg_u32		\n\t"
		"mov		%0, %2				\n\t"
		"cmp/eq		%2, %4				\n\t"
		"movli.l	@%2, %0	! __cmpxchg_u32		\n\t"
		"mov		%0, %1				\n\t"
		"cmp/eq		%1, %3				\n\t"
		"bf		2f				\n\t"
		"mov		%5, %0				\n\t"
		"mov		%3, %0				\n\t"
		"2:						\n\t"
		"movco.l	%0, @%1				\n\t"
		"movco.l	%0, @%2				\n\t"
		"bf		1b				\n\t"
		"synco						\n\t"
		: "=&z" (tmp), "=r" (m), "=&r" (retval)
		: "1" (m), "r" (old), "r" (new)
		: "=&z" (tmp), "=&r" (retval)
		: "r" (m), "r" (old), "r" (new)
		: "t", "memory"
	);