Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cfd8983f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

x86, locking/spinlocks: Remove ticket (spin)lock implementation



We've unconditionally used the queued spinlock for many releases now.

Its time to remove the old ticket lock code.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <waiman.long@hpe.com>
Cc: Waiman.Long@hpe.com
Cc: david.vrabel@citrix.com
Cc: dhowells@redhat.com
Cc: pbonzini@redhat.com
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/20160518184302.GO3193@twins.programming.kicks-ass.net


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0b429e18
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -705,7 +705,6 @@ config PARAVIRT_DEBUG
config PARAVIRT_SPINLOCKS
	bool "Paravirtualization layer for spinlocks"
	depends on PARAVIRT && SMP
	select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
	---help---
	  Paravirtualized spinlocks allow a pvops backend to replace the
	  spinlock implementation with something virtualization-friendly
@@ -718,7 +717,7 @@ config PARAVIRT_SPINLOCKS

config QUEUED_LOCK_STAT
	bool "Paravirt queued spinlock statistics"
	depends on PARAVIRT_SPINLOCKS && DEBUG_FS && QUEUED_SPINLOCKS
	depends on PARAVIRT_SPINLOCKS && DEBUG_FS
	---help---
	  Enable the collection of statistical data on the slowpath
	  behavior of paravirtualized queued spinlocks and report
+0 −18
Original line number Diff line number Diff line
@@ -661,8 +661,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,

#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)

#ifdef CONFIG_QUEUED_SPINLOCKS

static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
							u32 val)
{
@@ -684,22 +682,6 @@ static __always_inline void pv_kick(int cpu)
	PVOP_VCALL1(pv_lock_ops.kick, cpu);
}

#else /* !CONFIG_QUEUED_SPINLOCKS */

static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
							__ticket_t ticket)
{
	PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}

static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
							__ticket_t ticket)
{
	PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}

#endif /* CONFIG_QUEUED_SPINLOCKS */

#endif /* SMP && PARAVIRT_SPINLOCKS */

#ifdef CONFIG_X86_32
+0 −7
Original line number Diff line number Diff line
@@ -301,23 +301,16 @@ struct pv_mmu_ops {
struct arch_spinlock;
#ifdef CONFIG_SMP
#include <asm/spinlock_types.h>
#else
typedef u16 __ticket_t;
#endif

struct qspinlock;

struct pv_lock_ops {
#ifdef CONFIG_QUEUED_SPINLOCKS
	void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
	struct paravirt_callee_save queued_spin_unlock;

	void (*wait)(u8 *ptr, u8 val);
	void (*kick)(int cpu);
#else /* !CONFIG_QUEUED_SPINLOCKS */
	struct paravirt_callee_save lock_spinning;
	void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
#endif /* !CONFIG_QUEUED_SPINLOCKS */
};

/* This contains all the paravirt structures: we get a convenient
+0 −174
Original line number Diff line number Diff line
@@ -20,187 +20,13 @@
 * (the type definitions are in asm/spinlock_types.h)
 */

#ifdef CONFIG_X86_32
# define LOCK_PTR_REG "a"
#else
# define LOCK_PTR_REG "D"
#endif

#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
/*
 * On PPro SMP, we use a locked operation to unlock
 * (PPro errata 66, 92)
 */
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
#endif

/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD	(1 << 15)

extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);

#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm/qspinlock.h>
#else

#ifdef CONFIG_PARAVIRT_SPINLOCKS

static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
{
	set_bit(0, (volatile unsigned long *)&lock->tickets.head);
}

#else  /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
							__ticket_t ticket)
{
}
static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
							__ticket_t ticket)
{
}

#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
{
	return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
}

static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
							__ticket_t head)
{
	if (head & TICKET_SLOWPATH_FLAG) {
		arch_spinlock_t old, new;

		old.tickets.head = head;
		new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
		old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
		new.tickets.tail = old.tickets.tail;

		/* try to clear slowpath flag when there are no contenders */
		cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
	}
}

static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
	return __tickets_equal(lock.tickets.head, lock.tickets.tail);
}

/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 *
 * We use an xadd covering *both* parts of the lock, to increment the tail and
 * also load the position of the head, which takes care of memory ordering
 * issues and should be optimal for the uncontended case. Note the tail must be
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 */
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
	register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };

	inc = xadd(&lock->tickets, inc);
	if (likely(inc.head == inc.tail))
		goto out;

	for (;;) {
		unsigned count = SPIN_THRESHOLD;

		do {
			inc.head = READ_ONCE(lock->tickets.head);
			if (__tickets_equal(inc.head, inc.tail))
				goto clear_slowpath;
			cpu_relax();
		} while (--count);
		__ticket_lock_spinning(lock, inc.tail);
	}
clear_slowpath:
	__ticket_check_and_clear_slowpath(lock, inc.head);
out:
	barrier();	/* make sure nothing creeps before the lock is taken */
}

static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	arch_spinlock_t old, new;

	old.tickets = READ_ONCE(lock->tickets);
	if (!__tickets_equal(old.tickets.head, old.tickets.tail))
		return 0;

	new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
	new.head_tail &= ~TICKET_SLOWPATH_FLAG;

	/* cmpxchg is a full barrier, so nothing can move before it */
	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	if (TICKET_SLOWPATH_FLAG &&
		static_key_false(&paravirt_ticketlocks_enabled)) {
		__ticket_t head;

		BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);

		head = xadd(&lock->tickets.head, TICKET_LOCK_INC);

		if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
			head &= ~TICKET_SLOWPATH_FLAG;
			__ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
		}
	} else
		__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
}

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	struct __raw_tickets tmp = READ_ONCE(lock->tickets);

	return !__tickets_equal(tmp.tail, tmp.head);
}

static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
	struct __raw_tickets tmp = READ_ONCE(lock->tickets);

	tmp.head &= ~TICKET_SLOWPATH_FLAG;
	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
#define arch_spin_is_contended	arch_spin_is_contended

static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
						  unsigned long flags)
{
	arch_spin_lock(lock);
}

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	__ticket_t head = READ_ONCE(lock->tickets.head);

	for (;;) {
		struct __raw_tickets tmp = READ_ONCE(lock->tickets);
		/*
		 * We need to check "unlocked" in a loop, tmp.head == head
		 * can be false positive because of overflow.
		 */
		if (__tickets_equal(tmp.head, tmp.tail) ||
				!__tickets_equal(tmp.head, head))
			break;

		cpu_relax();
	}
}
#endif /* CONFIG_QUEUED_SPINLOCKS */

/*
 * Read-write spinlocks, allowing multiple readers
+0 −13
Original line number Diff line number Diff line
@@ -23,20 +23,7 @@ typedef u32 __ticketpair_t;

#define TICKET_SHIFT	(sizeof(__ticket_t) * 8)

#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm-generic/qspinlock_types.h>
#else
typedef struct arch_spinlock {
	union {
		__ticketpair_t head_tail;
		struct __raw_tickets {
			__ticket_t head, tail;
		} tickets;
	};
} arch_spinlock_t;

#define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }
#endif /* CONFIG_QUEUED_SPINLOCKS */

#include <asm-generic/qrwlock_types.h>

Loading