Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0199c4e6 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

locking: Convert __raw_spin* functions to arch_spin*



Name space cleanup. No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent edc35bd7
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -12,18 +12,18 @@
 * We make no fairness assumptions. They have a cost.
 */

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_is_locked(x)	((x)->lock != 0)
#define __raw_spin_unlock_wait(x) \
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x)	((x)->lock != 0)
#define arch_spin_unlock_wait(x) \
		do { cpu_relax(); } while ((x)->lock)

static inline void __raw_spin_unlock(arch_spinlock_t * lock)
static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
	mb();
	lock->lock = 0;
}

static inline void __raw_spin_lock(arch_spinlock_t * lock)
static inline void arch_spin_lock(arch_spinlock_t * lock)
{
	long tmp;

@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock)
	: "m"(lock->lock) : "memory");
}

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	return !test_and_set_bit(0, &lock->lock);
}
@@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()

#endif /* _ALPHA_SPINLOCK_H */
+10 −10
Original line number Diff line number Diff line
@@ -17,13 +17,13 @@
 * Locked value: 1
 */

#define __raw_spin_is_locked(x)		((x)->lock != 0)
#define __raw_spin_unlock_wait(lock) \
	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_is_locked(x)		((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	unsigned long tmp;

@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
	smp_mb();
}

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	unsigned long tmp;

@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
	}
}

static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	smp_mb();

@@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()

#endif /* __ASM_SPINLOCK_H */
+10 −10
Original line number Diff line number Diff line
@@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);

static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	return __raw_spin_is_locked_asm(&lock->lock);
}

static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	__raw_spin_lock_asm(&lock->lock);
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	return __raw_spin_trylock_asm(&lock->lock);
}

static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	__raw_spin_unlock_asm(&lock->lock);
}

static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
	while (arch_spin_is_locked(lock))
		cpu_relax();
}

@@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
	__raw_write_unlock_asm(&rw->lock);
}

#define _raw_spin_relax(lock)  	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()
#define arch_spin_relax(lock)  	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()

#endif

+23 −23
Original line number Diff line number Diff line
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);

static inline int __raw_spin_is_locked(arch_spinlock_t *x)
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
	return *(volatile signed char *)(&(x)->slock) <= 0;
}

static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	__asm__ volatile ("move.d %1,%0" \
			  : "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
			  : "memory");
}

static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
	while (arch_spin_is_locked(lock))
		cpu_relax();
}

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	return cris_spin_trylock((void *)&lock->slock);
}

static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	cris_spin_lock((void *)&lock->slock);
}

static inline void
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
	__raw_spin_lock(lock);
	arch_spin_lock(lock);
}

/*
@@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x)

static  inline void __raw_read_lock(raw_rwlock_t *rw)
{
	__raw_spin_lock(&rw->slock);
	arch_spin_lock(&rw->slock);
	while (rw->lock == 0);
	rw->lock--;
	__raw_spin_unlock(&rw->slock);
	arch_spin_unlock(&rw->slock);
}

static  inline void __raw_write_lock(raw_rwlock_t *rw)
{
	__raw_spin_lock(&rw->slock);
	arch_spin_lock(&rw->slock);
	while (rw->lock != RW_LOCK_BIAS);
	rw->lock = 0;
	__raw_spin_unlock(&rw->slock);
	arch_spin_unlock(&rw->slock);
}

static  inline void __raw_read_unlock(raw_rwlock_t *rw)
{
	__raw_spin_lock(&rw->slock);
	arch_spin_lock(&rw->slock);
	rw->lock++;
	__raw_spin_unlock(&rw->slock);
	arch_spin_unlock(&rw->slock);
}

static  inline void __raw_write_unlock(raw_rwlock_t *rw)
{
	__raw_spin_lock(&rw->slock);
	arch_spin_lock(&rw->slock);
	while (rw->lock != RW_LOCK_BIAS);
	rw->lock = RW_LOCK_BIAS;
	__raw_spin_unlock(&rw->slock);
	arch_spin_unlock(&rw->slock);
}

static  inline int __raw_read_trylock(raw_rwlock_t *rw)
{
	int ret = 0;
	__raw_spin_lock(&rw->slock);
	arch_spin_lock(&rw->slock);
	if (rw->lock != 0) {
		rw->lock--;
		ret = 1;
	}
	__raw_spin_unlock(&rw->slock);
	arch_spin_unlock(&rw->slock);
	return ret;
}

static  inline int __raw_write_trylock(raw_rwlock_t *rw)
{
	int ret = 0;
	__raw_spin_lock(&rw->slock);
	arch_spin_lock(&rw->slock);
	if (rw->lock == RW_LOCK_BIAS) {
		rw->lock = 0;
		ret = 1;
	}
	__raw_spin_unlock(&rw->slock);
	arch_spin_unlock(&rw->slock);
	return 1;
}

#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)

#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()

#endif /* __ASM_ARCH_SPINLOCK_H */
+1 −1
Original line number Diff line number Diff line
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
 * @addr: Address to start counting from
 *
 * Similarly to clear_bit_unlock, the implementation uses a store
 * with release semantics. See also __raw_spin_unlock().
 * with release semantics. See also arch_spin_unlock().
 */
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
Loading