Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 229855d6 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by H. Peter Anvin
Browse files

x86, ticketlock: Make __ticket_spin_trylock common



Make trylock code common regardless of ticket size.

(Also, rename arch_spinlock.slock to head_tail.)

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org


Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 2994488f
Loading
Loading
Loading
Loading
+12 −39
Original line number Original line Diff line number Diff line
@@ -69,60 +69,33 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
	barrier();		/* make sure nothing creeps before the lock is taken */
	barrier();		/* make sure nothing creeps before the lock is taken */
}
}


#if (NR_CPUS < 256)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
{
	unsigned int tmp, new;
	arch_spinlock_t old, new;

	asm volatile("movzwl %2, %0\n\t"
		     "cmpb %h0,%b0\n\t"
		     "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
		     "jne 1f\n\t"
		     LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
		     :
		     : "memory", "cc");


	return tmp;
	old.tickets = ACCESS_ONCE(lock->tickets);
	if (old.tickets.head != old.tickets.tail)
		return 0;

	new.head_tail = old.head_tail + (1 << TICKET_SHIFT);

	/* cmpxchg is a full barrier, so nothing can move before it */
	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
}


#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
{
	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
		     : "+m" (lock->slock)
		     : "+m" (lock->head_tail)
		     :
		     :
		     : "memory", "cc");
		     : "memory", "cc");
}
}
#else
#else
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
	unsigned tmp;
	unsigned new;

	asm volatile("movl %2,%0\n\t"
		     "movl %0,%1\n\t"
		     "roll $16, %0\n\t"
		     "cmpl %0,%1\n\t"
		     "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
		     "jne 1f\n\t"
		     LOCK_PREFIX "cmpxchgl %1,%2\n\t"
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
		     :
		     : "memory", "cc");

	return tmp;
}

static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
{
	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
		     : "+m" (lock->slock)
		     : "+m" (lock->head_tail)
		     :
		     :
		     : "memory", "cc");
		     : "memory", "cc");
}
}
+4 −2
Original line number Original line Diff line number Diff line
@@ -9,8 +9,10 @@


#if (CONFIG_NR_CPUS < 256)
#if (CONFIG_NR_CPUS < 256)
typedef u8  __ticket_t;
typedef u8  __ticket_t;
typedef u16 __ticketpair_t;
#else
#else
typedef u16 __ticket_t;
typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
#endif


#define TICKET_SHIFT	(sizeof(__ticket_t) * 8)
#define TICKET_SHIFT	(sizeof(__ticket_t) * 8)
@@ -18,14 +20,14 @@ typedef u16 __ticket_t;


typedef struct arch_spinlock {
typedef struct arch_spinlock {
	union {
	union {
		unsigned int slock;
		__ticketpair_t head_tail;
		struct __raw_tickets {
		struct __raw_tickets {
			__ticket_t head, tail;
			__ticket_t head, tail;
		} tickets;
		} tickets;
	};
	};
} arch_spinlock_t;
} arch_spinlock_t;


#define __ARCH_SPIN_LOCK_UNLOCKED	{ { .slock = 0 } }
#define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }


#include <asm/rwlock.h>
#include <asm/rwlock.h>