Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3cded417 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()



Avoid the pointless function call to pv_lock_ops.vcpu_is_preempted()
when a paravirt spinlock enabled kernel is ran on native hardware.

Do this by patching out the CALL instruction with "XOR %RAX,%RAX"
which has the same effect (0 return value).

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: benh@kernel.crashing.org
Cc: boqun.feng@gmail.com
Cc: borntraeger@de.ibm.com
Cc: bsingharora@gmail.com
Cc: dave@stgolabs.net
Cc: jgross@suse.com
Cc: kernellwp@gmail.com
Cc: konrad.wilk@oracle.com
Cc: mpe@ellerman.id.au
Cc: paulmck@linux.vnet.ibm.com
Cc: paulus@samba.org
Cc: pbonzini@redhat.com
Cc: rkrcmar@redhat.com
Cc: will.deacon@arm.com
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 05ffc951
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -678,6 +678,11 @@ static __always_inline void pv_kick(int cpu)
	PVOP_VCALL1(pv_lock_ops.kick, cpu);
}

static __always_inline bool pv_vcpu_is_preempted(int cpu)
{
	return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}

#endif /* SMP && PARAVIRT_SPINLOCKS */

#ifdef CONFIG_X86_32
+1 −1
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ struct pv_lock_ops {
	void (*wait)(u8 *ptr, u8 val);
	void (*kick)(int cpu);

	bool (*vcpu_is_preempted)(int cpu);
	struct paravirt_callee_save vcpu_is_preempted;
};

/* This contains all the paravirt structures: we get a convenient
+6 −0
Original line number Diff line number Diff line
@@ -32,6 +32,12 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
{
	pv_queued_spin_unlock(lock);
}

#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
	return pv_vcpu_is_preempted(cpu);
}
#else
static inline void queued_spin_unlock(struct qspinlock *lock)
{
+0 −8
Original line number Diff line number Diff line
@@ -26,14 +26,6 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);

#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
	return pv_lock_ops.vcpu_is_preempted(cpu);
}
#endif

#include <asm/qspinlock.h>

/*
+13 −12
Original line number Diff line number Diff line
@@ -415,15 +415,6 @@ void kvm_disable_steal_time(void)
	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}

static bool kvm_vcpu_is_preempted(int cpu)
{
	struct kvm_steal_time *src;

	src = &per_cpu(steal_time, cpu);

	return !!src->preempted;
}

#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
@@ -480,9 +471,6 @@ void __init kvm_guest_init(void)
	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		has_steal_clock = 1;
		pv_time_ops.steal_clock = kvm_steal_clock;
#ifdef CONFIG_PARAVIRT_SPINLOCKS
		pv_lock_ops.vcpu_is_preempted = kvm_vcpu_is_preempted;
#endif
	}

	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -604,6 +592,14 @@ static void kvm_wait(u8 *ptr, u8 val)
	local_irq_restore(flags);
}

__visible bool __kvm_vcpu_is_preempted(int cpu)
{
	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);

	return !!src->preempted;
}
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);

/*
 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
 */
@@ -620,6 +616,11 @@ void __init kvm_spinlock_init(void)
	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
	pv_lock_ops.wait = kvm_wait;
	pv_lock_ops.kick = kvm_kick_cpu;

	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		pv_lock_ops.vcpu_is_preempted =
			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
	}
}

static __init int kvm_spinlock_init_jump(void)
Loading