Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 816434ec authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 spinlock changes from Ingo Molnar:
 "The biggest change here are paravirtualized ticket spinlocks (PV
  spinlocks), which bring a nice speedup on various benchmarks.

  The KVM host side will come to you via the KVM tree"

* 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/kvm/guest: Fix sparse warning: "symbol 'klock_waiting' was not declared as static"
  kvm: Paravirtual ticketlocks support for linux guests running on KVM hypervisor
  kvm guest: Add configuration support to enable debug information for KVM Guests
  kvm uapi: Add KICK_CPU and PV_UNHALT definition to uapi
  xen, pvticketlock: Allow interrupts to be enabled while blocking
  x86, ticketlock: Add slowpath logic
  jump_label: Split jumplabel ratelimit
  x86, pvticketlock: When paravirtualizing ticket locks, increment by 2
  x86, pvticketlock: Use callee-save for lock_spinning
  xen, pvticketlocks: Add xen_nopvspin parameter to disable xen pv ticketlocks
  xen, pvticketlock: Xen implementation for PV ticket locks
  xen: Defer spinlock setup until boot CPU setup
  x86, ticketlock: Collapse a layer of functions
  x86, ticketlock: Don't inline _spin_unlock when using paravirt spinlocks
  x86, spinlock: Replace pv spinlocks with pv ticketlocks
parents f357a820 36bd6213
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -632,6 +632,7 @@ config PARAVIRT_DEBUG
config PARAVIRT_SPINLOCKS
	bool "Paravirtualization layer for spinlocks"
	depends on PARAVIRT && SMP
	select UNINLINE_SPIN_UNLOCK
	---help---
	  Paravirtualized spinlocks allow a pvops backend to replace the
	  spinlock implementation with something virtualization-friendly
@@ -656,6 +657,15 @@ config KVM_GUEST
	  underlying device model, the host provides the guest with
	  timing infrastructure such as time of day, and system time

config KVM_DEBUG_FS
	bool "Enable debug information for KVM Guests in debugfs"
	depends on KVM_GUEST && DEBUG_FS
	default n
	---help---
	  This option enables collection of various statistics for KVM guest.
	  Statistics are displayed in debugfs filesystem. Enabling this option
	  may incur significant overhead.

source "arch/x86/lguest/Kconfig"

config PARAVIRT_TIME_ACCOUNTING
+12 −2
Original line number Diff line number Diff line
@@ -112,10 +112,20 @@ void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void);
#else

#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init kvm_spinlock_init(void);
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static inline void kvm_spinlock_init(void)
{
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */

#else /* CONFIG_KVM_GUEST */
#define kvm_guest_init() do {} while (0)
#define kvm_async_pf_task_wait(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)

static inline u32 kvm_read_and_reset_pf_reason(void)
{
	return 0;
+6 −26
Original line number Diff line number Diff line
@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,

#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)

static inline int arch_spin_is_locked(struct arch_spinlock *lock)
static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
							__ticket_t ticket)
{
	return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
	PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}

static inline int arch_spin_is_contended(struct arch_spinlock *lock)
static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
							__ticket_t ticket)
{
	return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
#define arch_spin_is_contended	arch_spin_is_contended

static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
	PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}

static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
						  unsigned long flags)
{
	PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
}

static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
{
	return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
}

static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
{
	PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
	PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}

#endif
+8 −6
Original line number Diff line number Diff line
@@ -327,13 +327,15 @@ struct pv_mmu_ops {
};

struct arch_spinlock;
#ifdef CONFIG_SMP
#include <asm/spinlock_types.h>
#else
typedef u16 __ticket_t;
#endif

struct pv_lock_ops {
	int (*spin_is_locked)(struct arch_spinlock *lock);
	int (*spin_is_contended)(struct arch_spinlock *lock);
	void (*spin_lock)(struct arch_spinlock *lock);
	void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
	int (*spin_trylock)(struct arch_spinlock *lock);
	void (*spin_unlock)(struct arch_spinlock *lock);
	struct paravirt_callee_save lock_spinning;
	void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
};

/* This contains all the paravirt structures: we get a convenient
+87 −41
Original line number Diff line number Diff line
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H

#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>
#include <asm/paravirt.h>
#include <asm/bitops.h>

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
@@ -34,6 +37,31 @@
# define UNLOCK_LOCK_PREFIX
#endif

/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD	(1 << 15)

extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);

#ifdef CONFIG_PARAVIRT_SPINLOCKS

static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
{
	set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
}

#else  /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
							__ticket_t ticket)
{
}
static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
							__ticket_t ticket)
{
}

#endif /* CONFIG_PARAVIRT_SPINLOCKS */

static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
	return lock.tickets.head == lock.tickets.tail;
@@ -52,81 +80,101 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 */
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
	register struct __raw_tickets inc = { .tail = 1 };
	register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };

	inc = xadd(&lock->tickets, inc);
	if (likely(inc.head == inc.tail))
		goto out;

	inc.tail &= ~TICKET_SLOWPATH_FLAG;
	for (;;) {
		if (inc.head == inc.tail)
			break;
		unsigned count = SPIN_THRESHOLD;

		do {
			if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
				goto out;
			cpu_relax();
		inc.head = ACCESS_ONCE(lock->tickets.head);
		} while (--count);
		__ticket_lock_spinning(lock, inc.tail);
	}
	barrier();		/* make sure nothing creeps before the lock is taken */
out:	barrier();	/* make sure nothing creeps before the lock is taken */
}

static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	arch_spinlock_t old, new;

	old.tickets = ACCESS_ONCE(lock->tickets);
	if (old.tickets.head != old.tickets.tail)
	if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
		return 0;

	new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
	new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);

	/* cmpxchg is a full barrier, so nothing can move before it */
	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
					    arch_spinlock_t old)
{
	__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
}
	arch_spinlock_t new;

static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
	BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);

	return tmp.tail != tmp.head;
	/* Perform the unlock on the "before" copy */
	old.tickets.head += TICKET_LOCK_INC;

	/* Clear the slowpath flag */
	new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);

	/*
	 * If the lock is uncontended, clear the flag - use cmpxchg in
	 * case it changes behind our back though.
	 */
	if (new.tickets.head != new.tickets.tail ||
	    cmpxchg(&lock->head_tail, old.head_tail,
					new.head_tail) != old.head_tail) {
		/*
		 * Lock still has someone queued for it, so wake up an
		 * appropriate waiter.
		 */
		__ticket_unlock_kick(lock, old.tickets.head);
	}
}

static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
	if (TICKET_SLOWPATH_FLAG &&
	    static_key_false(&paravirt_ticketlocks_enabled)) {
		arch_spinlock_t prev;

	return (__ticket_t)(tmp.tail - tmp.head) > 1;
}
		prev = *lock;
		add_smp(&lock->tickets.head, TICKET_LOCK_INC);

#ifndef CONFIG_PARAVIRT_SPINLOCKS
		/* add_smp() is a full mb() */

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	return __ticket_spin_is_locked(lock);
		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
			__ticket_unlock_slowpath(lock, prev);
	} else
		__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
}

static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	return __ticket_spin_is_contended(lock);
}
#define arch_spin_is_contended	arch_spin_is_contended
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
	__ticket_spin_lock(lock);
	return tmp.tail != tmp.head;
}

static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
	return __ticket_spin_trylock(lock);
}
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	__ticket_spin_unlock(lock);
	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
#define arch_spin_is_contended	arch_spin_is_contended

static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
						  unsigned long flags)
@@ -134,8 +182,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
	arch_spin_lock(lock);
}

#endif	/* CONFIG_PARAVIRT_SPINLOCKS */

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	while (arch_spin_is_locked(lock))
Loading