Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1502f08e authored by Tony Luck's avatar Tony Luck
Browse files

[IA64] SMT friendly version of spin_unlock_wait()



We can be kinder to SMT systems in spin_unlock_wait.

Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 9d40ee20
Loading
Loading
Loading
Loading
+15 −2
Original line number Diff line number Diff line
@@ -75,6 +75,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
	ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}

static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
{
	int	*p = (int *)&lock->lock, ticket;

	ia64_invala();

	for (;;) {
		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
		if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
			return;
		cpu_relax();
	}
}

static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
	long tmp = ACCESS_ONCE(lock->lock);
@@ -123,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
		cpu_relax();
	__ticket_spin_unlock_wait(lock);
}

#define __raw_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)