Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e5931943 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

locking: Convert raw_rwlock functions to arch_rwlock



Name space cleanup for rwlock functions. No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent fb3a6bbc
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)

/***********************************************************/

static inline int __raw_read_can_lock(arch_rwlock_t *lock)
static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
	return (lock->lock & 1) == 0;
}

static inline int __raw_write_can_lock(arch_rwlock_t *lock)
static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
	return lock->lock == 0;
}

static inline void __raw_read_lock(arch_rwlock_t *lock)
static inline void arch_read_lock(arch_rwlock_t *lock)
{
	long regx;

@@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_rwlock_t *lock)
	: "m" (*lock) : "memory");
}

static inline void __raw_write_lock(arch_rwlock_t *lock)
static inline void arch_write_lock(arch_rwlock_t *lock)
{
	long regx;

@@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch_rwlock_t *lock)
	: "m" (*lock) : "memory");
}

static inline int __raw_read_trylock(arch_rwlock_t * lock)
static inline int arch_read_trylock(arch_rwlock_t * lock)
{
	long regx;
	int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arch_rwlock_t * lock)
	return success;
}

static inline int __raw_write_trylock(arch_rwlock_t * lock)
static inline int arch_write_trylock(arch_rwlock_t * lock)
{
	long regx;
	int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(arch_rwlock_t * lock)
	return success;
}

static inline void __raw_read_unlock(arch_rwlock_t * lock)
static inline void arch_read_unlock(arch_rwlock_t * lock)
{
	long regx;
	__asm__ __volatile__(
@@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arch_rwlock_t * lock)
	: "m" (*lock) : "memory");
}

static inline void __raw_write_unlock(arch_rwlock_t * lock)
static inline void arch_write_unlock(arch_rwlock_t * lock)
{
	mb();
	lock->lock = 0;
}

#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
+10 −10
Original line number Diff line number Diff line
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 * just write zero since the lock is exclusively held.
 */

static inline void __raw_write_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
	unsigned long tmp;

@@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
	smp_mb();
}

static inline int __raw_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	unsigned long tmp;

@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
	}
}

static inline void __raw_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	smp_mb();

@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
}

/* write_can_lock - would write_trylock() succeed? */
#define __raw_write_can_lock(x)		((x)->lock == 0)
#define arch_write_can_lock(x)		((x)->lock == 0)

/*
 * Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
 * currently active.  However, we know we won't have any write
 * locks.
 */
static inline void __raw_read_lock(arch_rwlock_t *rw)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
	unsigned long tmp, tmp2;

@@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
	smp_mb();
}

static inline void __raw_read_unlock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned long tmp, tmp2;

@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
	: "cc");
}

static inline int __raw_read_trylock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	unsigned long tmp, tmp2 = 1;

@@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
}

/* read_can_lock - would read_trylock() succeed? */
#define __raw_read_can_lock(x)		((x)->lock < 0x80000000)
#define arch_read_can_lock(x)		((x)->lock < 0x80000000)

#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
+20 −20
Original line number Diff line number Diff line
@@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
asmlinkage void __raw_read_lock_asm(volatile int *ptr);
asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
asmlinkage void arch_read_lock_asm(volatile int *ptr);
asmlinkage int arch_read_trylock_asm(volatile int *ptr);
asmlinkage void arch_read_unlock_asm(volatile int *ptr);
asmlinkage void arch_write_lock_asm(volatile int *ptr);
asmlinkage int arch_write_trylock_asm(volatile int *ptr);
asmlinkage void arch_write_unlock_asm(volatile int *ptr);

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
@@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
		cpu_relax();
}

static inline int __raw_read_can_lock(arch_rwlock_t *rw)
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
	return __raw_uncached_fetch_asm(&rw->lock) > 0;
}

static inline int __raw_write_can_lock(arch_rwlock_t *rw)
static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
	return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(arch_rwlock_t *rw)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
	__raw_read_lock_asm(&rw->lock);
	arch_read_lock_asm(&rw->lock);
}

static inline int __raw_read_trylock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	return __raw_read_trylock_asm(&rw->lock);
	return arch_read_trylock_asm(&rw->lock);
}

static inline void __raw_read_unlock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	__raw_read_unlock_asm(&rw->lock);
	arch_read_unlock_asm(&rw->lock);
}

static inline void __raw_write_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
	__raw_write_lock_asm(&rw->lock);
	arch_write_lock_asm(&rw->lock);
}

static inline int __raw_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	return __raw_write_trylock_asm(&rw->lock);
	return arch_write_trylock_asm(&rw->lock);
}

static inline void __raw_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	__raw_write_unlock_asm(&rw->lock);
	arch_write_unlock_asm(&rw->lock);
}

#define arch_spin_relax(lock)  	cpu_relax()
+8 −8
Original line number Diff line number Diff line
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 *
 */

static inline int __raw_read_can_lock(arch_rwlock_t *x)
static inline int arch_read_can_lock(arch_rwlock_t *x)
{
	return (int)(x)->lock > 0;
}

static inline int __raw_write_can_lock(arch_rwlock_t *x)
static inline int arch_write_can_lock(arch_rwlock_t *x)
{
	return (x)->lock == RW_LOCK_BIAS;
}

static  inline void __raw_read_lock(arch_rwlock_t *rw)
static  inline void arch_read_lock(arch_rwlock_t *rw)
{
	arch_spin_lock(&rw->slock);
	while (rw->lock == 0);
@@ -74,7 +74,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
	arch_spin_unlock(&rw->slock);
}

static  inline void __raw_write_lock(arch_rwlock_t *rw)
static  inline void arch_write_lock(arch_rwlock_t *rw)
{
	arch_spin_lock(&rw->slock);
	while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
	arch_spin_unlock(&rw->slock);
}

static  inline void __raw_read_unlock(arch_rwlock_t *rw)
static  inline void arch_read_unlock(arch_rwlock_t *rw)
{
	arch_spin_lock(&rw->slock);
	rw->lock++;
	arch_spin_unlock(&rw->slock);
}

static  inline void __raw_write_unlock(arch_rwlock_t *rw)
static  inline void arch_write_unlock(arch_rwlock_t *rw)
{
	arch_spin_lock(&rw->slock);
	while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
	arch_spin_unlock(&rw->slock);
}

static  inline int __raw_read_trylock(arch_rwlock_t *rw)
static  inline int arch_read_trylock(arch_rwlock_t *rw)
{
	int ret = 0;
	arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
	return ret;
}

static  inline int __raw_write_trylock(arch_rwlock_t *rw)
static  inline int arch_write_trylock(arch_rwlock_t *rw)
{
	int ret = 0;
	arch_spin_lock(&rw->slock);
+16 −16
Original line number Diff line number Diff line
@@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
	__ticket_spin_unlock_wait(lock);
}

#define __raw_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
#define __raw_write_can_lock(rw)	(*(volatile int *)(rw) == 0)
#define arch_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
#define arch_write_can_lock(rw)	(*(volatile int *)(rw) == 0)

#ifdef ASM_SUPPORTED

static __always_inline void
__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
	__asm__ __volatile__ (
		"tbit.nz p6, p0 = %1,%2\n"
@@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
		: "p6", "p7", "r2", "memory");
}

#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)

#else /* !ASM_SUPPORTED */

#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)

#define __raw_read_lock(rw)								\
#define arch_read_lock(rw)								\
do {											\
	arch_rwlock_t *__read_lock_ptr = (rw);						\
											\
@@ -188,7 +188,7 @@ do { \

#endif /* !ASM_SUPPORTED */

#define __raw_read_unlock(rw)					\
#define arch_read_unlock(rw)					\
do {								\
	arch_rwlock_t *__read_lock_ptr = (rw);			\
	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
@@ -197,7 +197,7 @@ do { \
#ifdef ASM_SUPPORTED

static __always_inline void
__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
	__asm__ __volatile__ (
		"tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}

#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)

#define __raw_write_trylock(rw)							\
#define arch_write_trylock(rw)							\
({										\
	register long result;							\
										\
@@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
	(result == 0);								\
})

static inline void __raw_write_unlock(arch_rwlock_t *x)
static inline void arch_write_unlock(arch_rwlock_t *x)
{
	u8 *y = (u8 *)x;
	barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)

#else /* !ASM_SUPPORTED */

#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
#define arch_write_lock_flags(l, flags) arch_write_lock(l)

#define __raw_write_lock(l)								\
#define arch_write_lock(l)								\
({											\
	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
	} while (ia64_val);								\
})

#define __raw_write_trylock(rw)						\
#define arch_write_trylock(rw)						\
({									\
	__u64 ia64_val;							\
	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
	(ia64_val == 0);						\
})

static inline void __raw_write_unlock(arch_rwlock_t *x)
static inline void arch_write_unlock(arch_rwlock_t *x)
{
	barrier();
	x->write_lock = 0;
@@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)

#endif /* !ASM_SUPPORTED */

static inline int __raw_read_trylock(arch_rwlock_t *x)
static inline int arch_read_trylock(arch_rwlock_t *x)
{
	union {
		arch_rwlock_t lock;
Loading