Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b862f3b0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

i386: improve and correct inline asm memory constraints



Use "+m" rather than a combination of "=m" and "m" for improved clarity
and consistency.

This also fixes some inlines that incorrectly didn't tell the compiler
that they read the old value at all, potentially causing the compiler to
generate bogus code.  It appear that all of those potential bugs were
hidden by the use of extra "volatile" specifiers on the data structures
in question, though.

Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e2a3d402
Loading
Loading
Loading
Loading
+14 −16
Original line number Original line Diff line number Diff line
@@ -46,8 +46,8 @@ static __inline__ void atomic_add(int i, atomic_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "addl %1,%0"
		LOCK_PREFIX "addl %1,%0"
		:"=m" (v->counter)
		:"+m" (v->counter)
		:"ir" (i), "m" (v->counter));
		:"ir" (i));
}
}


/**
/**
@@ -61,8 +61,8 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "subl %1,%0"
		LOCK_PREFIX "subl %1,%0"
		:"=m" (v->counter)
		:"+m" (v->counter)
		:"ir" (i), "m" (v->counter));
		:"ir" (i));
}
}


/**
/**
@@ -80,8 +80,8 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)


	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "subl %2,%0; sete %1"
		LOCK_PREFIX "subl %2,%0; sete %1"
		:"=m" (v->counter), "=qm" (c)
		:"+m" (v->counter), "=qm" (c)
		:"ir" (i), "m" (v->counter) : "memory");
		:"ir" (i) : "memory");
	return c;
	return c;
}
}


@@ -95,8 +95,7 @@ static __inline__ void atomic_inc(atomic_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "incl %0"
		LOCK_PREFIX "incl %0"
		:"=m" (v->counter)
		:"+m" (v->counter));
		:"m" (v->counter));
}
}


/**
/**
@@ -109,8 +108,7 @@ static __inline__ void atomic_dec(atomic_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "decl %0"
		LOCK_PREFIX "decl %0"
		:"=m" (v->counter)
		:"+m" (v->counter));
		:"m" (v->counter));
}
}


/**
/**
@@ -127,8 +125,8 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)


	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "decl %0; sete %1"
		LOCK_PREFIX "decl %0; sete %1"
		:"=m" (v->counter), "=qm" (c)
		:"+m" (v->counter), "=qm" (c)
		:"m" (v->counter) : "memory");
		: : "memory");
	return c != 0;
	return c != 0;
}
}


@@ -146,8 +144,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)


	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "incl %0; sete %1"
		LOCK_PREFIX "incl %0; sete %1"
		:"=m" (v->counter), "=qm" (c)
		:"+m" (v->counter), "=qm" (c)
		:"m" (v->counter) : "memory");
		: : "memory");
	return c != 0;
	return c != 0;
}
}


@@ -166,8 +164,8 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)


	__asm__ __volatile__(
	__asm__ __volatile__(
		LOCK_PREFIX "addl %2,%0; sets %1"
		LOCK_PREFIX "addl %2,%0; sets %1"
		:"=m" (v->counter), "=qm" (c)
		:"+m" (v->counter), "=qm" (c)
		:"ir" (i), "m" (v->counter) : "memory");
		:"ir" (i) : "memory");
	return c;
	return c;
}
}


+5 −5
Original line number Original line Diff line number Diff line
@@ -20,8 +20,8 @@
	.align	8\n\
	.align	8\n\
	.long	1b,3b\n\
	.long	1b,3b\n\
	.previous"						\
	.previous"						\
	: "=r" (oldval), "=r" (ret), "=m" (*uaddr)		\
	: "=r" (oldval), "=r" (ret), "+m" (*uaddr)		\
	: "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
	: "i" (-EFAULT), "0" (oparg), "1" (0))


#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
  __asm__ __volatile (						\
  __asm__ __volatile (						\
@@ -38,9 +38,9 @@
	.align	8\n\
	.align	8\n\
	.long	1b,4b,2b,4b\n\
	.long	1b,4b,2b,4b\n\
	.previous"						\
	.previous"						\
	: "=&a" (oldval), "=&r" (ret), "=m" (*uaddr),		\
	: "=&a" (oldval), "=&r" (ret), "+m" (*uaddr),		\
	  "=&r" (tem)						\
	  "=&r" (tem)						\
	: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
	: "r" (oparg), "i" (-EFAULT), "1" (0))


static inline int
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -123,7 +123,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
		"	.long   1b,3b				\n"
		"	.long   1b,3b				\n"
		"	.previous				\n"
		"	.previous				\n"


		: "=a" (oldval), "=m" (*uaddr)
		: "=a" (oldval), "+m" (*uaddr)
		: "i" (-EFAULT), "r" (newval), "0" (oldval)
		: "i" (-EFAULT), "r" (newval), "0" (oldval)
		: "memory"
		: "memory"
	);
	);
+6 −8
Original line number Original line Diff line number Diff line
@@ -17,32 +17,30 @@ static __inline__ void local_inc(local_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		"incl %0"
		"incl %0"
		:"=m" (v->counter)
		:"+m" (v->counter));
		:"m" (v->counter));
}
}


static __inline__ void local_dec(local_t *v)
static __inline__ void local_dec(local_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		"decl %0"
		"decl %0"
		:"=m" (v->counter)
		:"+m" (v->counter));
		:"m" (v->counter));
}
}


static __inline__ void local_add(long i, local_t *v)
static __inline__ void local_add(long i, local_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		"addl %1,%0"
		"addl %1,%0"
		:"=m" (v->counter)
		:"+m" (v->counter)
		:"ir" (i), "m" (v->counter));
		:"ir" (i));
}
}


static __inline__ void local_sub(long i, local_t *v)
static __inline__ void local_sub(long i, local_t *v)
{
{
	__asm__ __volatile__(
	__asm__ __volatile__(
		"subl %1,%0"
		"subl %1,%0"
		:"=m" (v->counter)
		:"+m" (v->counter)
		:"ir" (i), "m" (v->counter));
		:"ir" (i));
}
}


/* On x86, these are no better than the atomic variants. */
/* On x86, these are no better than the atomic variants. */
+2 −2
Original line number Original line Diff line number Diff line
@@ -51,12 +51,12 @@ typedef struct {
#undef	__FD_SET
#undef	__FD_SET
#define __FD_SET(fd,fdsetp) \
#define __FD_SET(fd,fdsetp) \
		__asm__ __volatile__("btsl %1,%0": \
		__asm__ __volatile__("btsl %1,%0": \
			"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
			"+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))


#undef	__FD_CLR
#undef	__FD_CLR
#define __FD_CLR(fd,fdsetp) \
#define __FD_CLR(fd,fdsetp) \
		__asm__ __volatile__("btrl %1,%0": \
		__asm__ __volatile__("btrl %1,%0": \
			"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
			"+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))


#undef	__FD_ISSET
#undef	__FD_ISSET
#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
+2 −2
Original line number Original line Diff line number Diff line
@@ -37,7 +37,7 @@
			"popl %%eax\n\t" \
			"popl %%eax\n\t" \
			"1:\n", \
			"1:\n", \
			"subl $1,%0\n\t", \
			"subl $1,%0\n\t", \
			"=m" (*(volatile int *)rw) : : "memory")
			"+m" (*(volatile int *)rw) : : "memory")


#define __build_read_lock(rw, helper)	do { \
#define __build_read_lock(rw, helper)	do { \
						if (__builtin_constant_p(rw)) \
						if (__builtin_constant_p(rw)) \
@@ -63,7 +63,7 @@
			"popl %%eax\n\t" \
			"popl %%eax\n\t" \
			"1:\n", \
			"1:\n", \
			"subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
			"subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
			"=m" (*(volatile int *)rw) : : "memory")
			"+m" (*(volatile int *)rw) : : "memory")


#define __build_write_lock(rw, helper)	do { \
#define __build_write_lock(rw, helper)	do { \
						if (__builtin_constant_p(rw)) \
						if (__builtin_constant_p(rw)) \
Loading