Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bfe3349b authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky
Browse files

[S390] atomic ops: small cleanups



Couple of coding style fixes, replace __inline__ with inline and
remove #ifdef __KERNEL_- since the header file isn't exported.

Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 12751058
Loading
Loading
Loading
Loading
+19 −22
Original line number Diff line number Diff line
@@ -18,8 +18,6 @@

#define ATOMIC_INIT(i)  { (i) }

#ifdef __KERNEL__

#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)

#define __CS_LOOP(ptr, op_val, op_string) ({				\
@@ -69,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i)
	barrier();
}

static __inline__ int atomic_add_return(int i, atomic_t * v)
static inline int atomic_add_return(int i, atomic_t *v)
{
	return __CS_LOOP(v, i, "ar");
}
@@ -79,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
#define atomic_inc_return(_v)		atomic_add_return(1, _v)
#define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)

static __inline__ int atomic_sub_return(int i, atomic_t * v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
	return __CS_LOOP(v, i, "sr");
}
@@ -89,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(_v)		atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)

static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
	__CS_LOOP(v, ~mask, "nr");
}

static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
{
	__CS_LOOP(v, mask, "or");
}

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
	asm volatile(
@@ -119,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
	return old;
}

static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
	int c, old;
	c = atomic_read(v);
@@ -191,29 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i)
	barrier();
}

static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
	return __CSG_LOOP(v, i, "agr");
}

static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
	return __CSG_LOOP(v, i, "sgr");
}

static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
	__CSG_LOOP(v, ~mask, "ngr");
}

static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
	__CSG_LOOP(v, mask, "ogr");
}

#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
static inline long long atomic64_cmpxchg(atomic64_t *v,
					     long long old, long long new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
@@ -337,8 +335,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)

#endif /* CONFIG_64BIT */

static __inline__ int atomic64_add_unless(atomic64_t *v,
					  long long a, long long u)
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
	long long c, old;
	c = atomic64_read(v);
@@ -371,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
#define smp_mb__after_atomic_inc()	smp_mb()

#include <asm-generic/atomic-long.h>
#endif /* __KERNEL__ */

#endif /* __ARCH_S390_ATOMIC__  */