Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84eb950d authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by H. Peter Anvin
Browse files

x86, ticketlock: Clean up types and accessors



A few cleanups to the way spinlocks are defined and accessed:
 - define __ticket_t which is the size of a spinlock ticket (ie, enough
   bits to hold all the cpus)
 - Define struct arch_spinlock as a union containing plain slock and
   the head and tail tickets
 - Use head and tail to implement some of the spinlock predicates.
 - Make all ticket variables unsigned.
 - Use TICKET_SHIFT to form constants

Most of this will be used in later patches.

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org


Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 8b8bc2f7
Loading
Loading
Loading
Loading
+10 −14
Original line number Diff line number Diff line
@@ -55,11 +55,9 @@
 * much between them in performance though, especially as locks are out of line.
 */
#if (NR_CPUS < 256)
#define TICKET_SHIFT 8

static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
	short inc = 0x0100;
	unsigned short inc = 1 << TICKET_SHIFT;

	asm volatile (
		LOCK_PREFIX "xaddw %w0, %1\n"
@@ -78,7 +76,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)

static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
	int tmp, new;
	unsigned int tmp, new;

	asm volatile("movzwl %2, %0\n\t"
		     "cmpb %h0,%b0\n\t"
@@ -103,12 +101,10 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
		     : "memory", "cc");
}
#else
#define TICKET_SHIFT 16

static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
	int inc = 0x00010000;
	int tmp;
	unsigned inc = 1 << TICKET_SHIFT;
	unsigned tmp;

	asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
		     "movzwl %w0, %2\n\t"
@@ -128,8 +124,8 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)

static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
	int tmp;
	int new;
	unsigned tmp;
	unsigned new;

	asm volatile("movl %2,%0\n\t"
		     "movl %0,%1\n\t"
@@ -159,16 +155,16 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)

static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
	int tmp = ACCESS_ONCE(lock->slock);
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
	return !!(tmp.tail ^ tmp.head);
}

static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
	int tmp = ACCESS_ONCE(lock->slock);
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

	return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
	return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
}

#ifndef CONFIG_PARAVIRT_SPINLOCKS
+18 −2
Original line number Diff line number Diff line
@@ -5,11 +5,27 @@
# error "please don't include this file directly"
#endif

#include <linux/types.h>

#if (CONFIG_NR_CPUS < 256)
typedef u8  __ticket_t;
#else
typedef u16 __ticket_t;
#endif

#define TICKET_SHIFT	(sizeof(__ticket_t) * 8)
#define TICKET_MASK	((__ticket_t)((1 << TICKET_SHIFT) - 1))

typedef struct arch_spinlock {
	union {
		unsigned int slock;
		struct __raw_tickets {
			__ticket_t head, tail;
		} tickets;
	};
} arch_spinlock_t;

#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
#define __ARCH_SPIN_LOCK_UNLOCKED	{ { .slock = 0 } }

#include <asm/rwlock.h>