Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2994488f authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by H. Peter Anvin
Browse files

x86, ticketlock: Convert __ticket_spin_lock to use xadd()



Convert the two variants of __ticket_spin_lock() to use xadd(), which
has the effect of making them identical, so remove the duplicate function.

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org


Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent c576a3ea
Loading
Loading
Loading
Loading
+5 −30
Original line number Diff line number Diff line
@@ -54,26 +54,22 @@
 * save some instructions and make the code more elegant. There really isn't
 * much between them in performance though, especially as locks are out of line.
 */
#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
	register union {
		struct __raw_tickets tickets;
		unsigned short slock;
	} inc = { .slock = 1 << TICKET_SHIFT };
	register struct __raw_tickets inc = { .tail = 1 };

	asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"
		      : "+Q" (inc), "+m" (lock->slock) : : "memory", "cc");
	inc = xadd(&lock->tickets, inc);

	for (;;) {
		if (inc.tickets.head == inc.tickets.tail)
		if (inc.head == inc.tail)
			break;
		cpu_relax();
		inc.tickets.head = ACCESS_ONCE(lock->tickets.head);
		inc.head = ACCESS_ONCE(lock->tickets.head);
	}
	barrier();		/* make sure nothing creeps before the lock is taken */
}

#if (NR_CPUS < 256)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
	unsigned int tmp, new;
@@ -101,27 +97,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
		     : "memory", "cc");
}
#else
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
	unsigned inc = 1 << TICKET_SHIFT;
	__ticket_t tmp;

	asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t"
		     : "+r" (inc), "+m" (lock->slock)
		     : : "memory", "cc");

	tmp = inc;
	inc >>= TICKET_SHIFT;

	for (;;) {
		if ((__ticket_t)inc == tmp)
			break;
		cpu_relax();
		tmp = ACCESS_ONCE(lock->tickets.head);
	}
	barrier();		/* make sure nothing creeps before the lock is taken */
}

static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
	unsigned tmp;