Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb3a6bbc authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

locking: Convert raw_rwlock to arch_rwlock



Not strictly necessary for -rt as -rt does not have non sleeping
rwlocks, but it's odd to not have a consistent naming convention.

No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent 0199c4e6
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)

/***********************************************************/

static inline int __raw_read_can_lock(raw_rwlock_t *lock)
static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
	return (lock->lock & 1) == 0;
}

static inline int __raw_write_can_lock(raw_rwlock_t *lock)
static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
	return lock->lock == 0;
}

static inline void __raw_read_lock(raw_rwlock_t *lock)
static inline void __raw_read_lock(arch_rwlock_t *lock)
{
	long regx;

@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
	: "m" (*lock) : "memory");
}

static inline void __raw_write_lock(raw_rwlock_t *lock)
static inline void __raw_write_lock(arch_rwlock_t *lock)
{
	long regx;

@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
	: "m" (*lock) : "memory");
}

static inline int __raw_read_trylock(raw_rwlock_t * lock)
static inline int __raw_read_trylock(arch_rwlock_t * lock)
{
	long regx;
	int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
	return success;
}

static inline int __raw_write_trylock(raw_rwlock_t * lock)
static inline int __raw_write_trylock(arch_rwlock_t * lock)
{
	long regx;
	int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
	return success;
}

static inline void __raw_read_unlock(raw_rwlock_t * lock)
static inline void __raw_read_unlock(arch_rwlock_t * lock)
{
	long regx;
	__asm__ __volatile__(
@@ -160,7 +160,7 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
	: "m" (*lock) : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t * lock)
static inline void __raw_write_unlock(arch_rwlock_t * lock)
{
	mb();
	lock->lock = 0;
+2 −2
Original line number Diff line number Diff line
@@ -13,8 +13,8 @@ typedef struct {

typedef struct {
	volatile unsigned int lock;
} raw_rwlock_t;
} arch_rwlock_t;

#define __RAW_RW_LOCK_UNLOCKED		{ 0 }
#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }

#endif
+6 −6
Original line number Diff line number Diff line
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 * just write zero since the lock is exclusively held.
 */

static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline void __raw_write_lock(arch_rwlock_t *rw)
{
	unsigned long tmp;

@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
	smp_mb();
}

static inline int __raw_write_trylock(raw_rwlock_t *rw)
static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
	unsigned long tmp;

@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
	}
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
	smp_mb();

@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
 * currently active.  However, we know we won't have any write
 * locks.
 */
static inline void __raw_read_lock(raw_rwlock_t *rw)
static inline void __raw_read_lock(arch_rwlock_t *rw)
{
	unsigned long tmp, tmp2;

@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
	smp_mb();
}

static inline void __raw_read_unlock(raw_rwlock_t *rw)
static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
	unsigned long tmp, tmp2;

@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
	: "cc");
}

static inline int __raw_read_trylock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
	unsigned long tmp, tmp2 = 1;

+2 −2
Original line number Diff line number Diff line
@@ -13,8 +13,8 @@ typedef struct {

typedef struct {
	volatile unsigned int lock;
} raw_rwlock_t;
} arch_rwlock_t;

#define __RAW_RW_LOCK_UNLOCKED		{ 0 }
#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }

#endif
+8 −8
Original line number Diff line number Diff line
@@ -52,42 +52,42 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
		cpu_relax();
}

static inline int __raw_read_can_lock(raw_rwlock_t *rw)
static inline int __raw_read_can_lock(arch_rwlock_t *rw)
{
	return __raw_uncached_fetch_asm(&rw->lock) > 0;
}

static inline int __raw_write_can_lock(raw_rwlock_t *rw)
static inline int __raw_write_can_lock(arch_rwlock_t *rw)
{
	return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(raw_rwlock_t *rw)
static inline void __raw_read_lock(arch_rwlock_t *rw)
{
	__raw_read_lock_asm(&rw->lock);
}

static inline int __raw_read_trylock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
	return __raw_read_trylock_asm(&rw->lock);
}

static inline void __raw_read_unlock(raw_rwlock_t *rw)
static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
	__raw_read_unlock_asm(&rw->lock);
}

static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline void __raw_write_lock(arch_rwlock_t *rw)
{
	__raw_write_lock_asm(&rw->lock);
}

static inline int __raw_write_trylock(raw_rwlock_t *rw)
static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
	return __raw_write_trylock_asm(&rw->lock);
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
	__raw_write_unlock_asm(&rw->lock);
}
Loading