Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e87fc0ec authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/atomic, arch/blackfin: Implement atomic_fetch_{add,sub,and,or,xor}()



Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1a6eafac
Loading
Loading
Loading
Loading
+8 −0
Original line number Original line Diff line number Diff line
@@ -17,6 +17,7 @@


asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value);


asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
@@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))


#define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i)
#define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i))

#define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)


#define atomic_fetch_or(i, v)  __raw_atomic_or_asm(&(v)->counter, i)
#define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i)
#define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i)

#endif
#endif


#include <asm-generic/atomic.h>
#include <asm-generic/atomic.h>
+1 −0
Original line number Original line Diff line number Diff line
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16);


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__raw_atomic_add_asm);
EXPORT_SYMBOL(__raw_atomic_add_asm);
EXPORT_SYMBOL(__raw_atomic_xadd_asm);
EXPORT_SYMBOL(__raw_atomic_and_asm);
EXPORT_SYMBOL(__raw_atomic_and_asm);
EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
+31 −12
Original line number Original line Diff line number Diff line
@@ -605,6 +605,28 @@ ENTRY(___raw_atomic_add_asm)
	rts;
	rts;
ENDPROC(___raw_atomic_add_asm)
ENDPROC(___raw_atomic_add_asm)


/*
 * r0 = ptr
 * r1 = value
 *
 * ADD a signed value to a 32bit word and return the old value atomically.
 * Clobbers: r3:0, p1:0
 */
ENTRY(___raw_atomic_xadd_asm)
	p1 = r0;
	r3 = r1;
	[--sp] = rets;
	call _get_core_lock;
	r3 = [p1];
	r2 = r3 + r2;
	[p1] = r2;
	r1 = p1;
	call _put_core_lock;
	r0 = r3;
	rets = [sp++];
	rts;
ENDPROC(___raw_atomic_add_asm)

/*
/*
 * r0 = ptr
 * r0 = ptr
 * r1 = mask
 * r1 = mask
@@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm)
	r3 = r1;
	r3 = r1;
	[--sp] = rets;
	[--sp] = rets;
	call _get_core_lock;
	call _get_core_lock;
	r2 = [p1];
	r3 = [p1];
	r3 = r2 & r3;
	r2 = r2 & r3;
	[p1] = r3;
	[p1] = r2;
	r3 = r2;
	r1 = p1;
	r1 = p1;
	call _put_core_lock;
	call _put_core_lock;
	r0 = r3;
	r0 = r3;
@@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm)
	r3 = r1;
	r3 = r1;
	[--sp] = rets;
	[--sp] = rets;
	call _get_core_lock;
	call _get_core_lock;
	r2 = [p1];
	r3 = [p1];
	r3 = r2 | r3;
	r2 = r2 | r3;
	[p1] = r3;
	[p1] = r2;
	r3 = r2;
	r1 = p1;
	r1 = p1;
	call _put_core_lock;
	call _put_core_lock;
	r0 = r3;
	r0 = r3;
@@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm)
	r3 = r1;
	r3 = r1;
	[--sp] = rets;
	[--sp] = rets;
	call _get_core_lock;
	call _get_core_lock;
	r2 = [p1];
	r3 = [p1];
	r3 = r2 ^ r3;
	r2 = r2 ^ r3;
	[p1] = r3;
	[p1] = r2;
	r3 = r2;
	r1 = p1;
	r1 = p1;
	call _put_core_lock;
	call _put_core_lock;
	r0 = r3;
	r0 = r3;