Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 445c8951 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

locking: Convert raw_spinlock to arch_spinlock



The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.

Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever

No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent 6b6b4792
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -17,13 +17,13 @@
#define __raw_spin_unlock_wait(x) \
		do { cpu_relax(); } while ((x)->lock)

static inline void __raw_spin_unlock(raw_spinlock_t * lock)
static inline void __raw_spin_unlock(arch_spinlock_t * lock)
{
	mb();
	lock->lock = 0;
}

static inline void __raw_spin_lock(raw_spinlock_t * lock)
static inline void __raw_spin_lock(arch_spinlock_t * lock)
{
	long tmp;

@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
	: "m"(lock->lock) : "memory");
}

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
	return !test_and_set_bit(0, &lock->lock);
}
+1 −1
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@

typedef struct {
	volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }

+3 −3
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
	unsigned long tmp;

@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
	smp_mb();
}

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
	unsigned long tmp;

@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
	}
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
	smp_mb();

+1 −1
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@

typedef struct {
	volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }

+5 −5
Original line number Diff line number Diff line
@@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);

static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
	return __raw_spin_is_locked_asm(&lock->lock);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
	__raw_spin_lock_asm(&lock->lock);
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
	return __raw_spin_trylock_asm(&lock->lock);
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
	__raw_spin_unlock_asm(&lock->lock);
}

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
		cpu_relax();
Loading