Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ecee273f authored by Scott Wood's avatar Scott Wood Committed by Avi Kivity
Browse files

KVM: PPC: booke: use shadow_msr



Keep the guest MSR and the guest-mode true MSR separate, rather than
modifying the guest MSR on each guest entry to produce a true MSR.

Any bits which should be modified based on guest MSR must be explicitly
propagated from vcpu->arch.shared->msr to vcpu->arch.shadow_msr in
kvmppc_set_msr().

While we're modifying the guest entry code, reorder a few instructions
to bury some load latencies.

Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent c51584d5
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -219,12 +219,12 @@ struct kvm_vcpu_arch {
#endif
#endif


#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_BOOK3S
	ulong shadow_msr;
	ulong hflags;
	ulong hflags;
	ulong guest_owned_ext;
	ulong guest_owned_ext;
#endif
#endif
	u32 vrsave; /* also USPRG0 */
	u32 vrsave; /* also USPRG0 */
	u32 mmucr;
	u32 mmucr;
	ulong shadow_msr;
	ulong sprg4;
	ulong sprg4;
	ulong sprg5;
	ulong sprg5;
	ulong sprg6;
	ulong sprg6;
+1 −1
Original line number Original line Diff line number Diff line
@@ -404,12 +404,12 @@ int main(void)
	DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
	DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
	DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
	DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
	DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));


	/* book3s */
	/* book3s */
#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_BOOK3S
	DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
	DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
	DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
	DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
	DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
	DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
	DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
	DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
	DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
	DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
	DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+1 −0
Original line number Original line Diff line number Diff line
@@ -514,6 +514,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)


	vcpu->arch.pc = 0;
	vcpu->arch.pc = 0;
	vcpu->arch.shared->msr = 0;
	vcpu->arch.shared->msr = 0;
	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */


	vcpu->arch.shadow_pid = 1;
	vcpu->arch.shadow_pid = 1;
+6 −11
Original line number Original line Diff line number Diff line
@@ -24,8 +24,6 @@
#include <asm/page.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>


#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)

#define VCPU_GPR(n)     (VCPU_GPRS + (n * 4))
#define VCPU_GPR(n)     (VCPU_GPRS + (n * 4))


/* The host stack layout: */
/* The host stack layout: */
@@ -405,20 +403,17 @@ lightweight_exit:


	/* Finish loading guest volatiles and jump to guest. */
	/* Finish loading guest volatiles and jump to guest. */
	lwz	r3, VCPU_CTR(r4)
	lwz	r3, VCPU_CTR(r4)
	lwz	r5, VCPU_CR(r4)
	lwz	r6, VCPU_PC(r4)
	lwz	r7, VCPU_SHADOW_MSR(r4)
	mtctr	r3
	mtctr	r3
	lwz	r3, VCPU_CR(r4)
	mtcr	r5
	mtcr	r3
	mtsrr0	r6
	mtsrr1	r7
	lwz	r5, VCPU_GPR(r5)(r4)
	lwz	r5, VCPU_GPR(r5)(r4)
	lwz	r6, VCPU_GPR(r6)(r4)
	lwz	r6, VCPU_GPR(r6)(r4)
	lwz	r7, VCPU_GPR(r7)(r4)
	lwz	r7, VCPU_GPR(r7)(r4)
	lwz	r8, VCPU_GPR(r8)(r4)
	lwz	r8, VCPU_GPR(r8)(r4)
	lwz	r3, VCPU_PC(r4)
	mtsrr0	r3
	lwz	r3, VCPU_SHARED(r4)
	lwz	r3, (VCPU_SHARED_MSR + 4)(r3)
	oris	r3, r3, KVMPPC_MSR_MASK@h
	ori	r3, r3, KVMPPC_MSR_MASK@l
	mtsrr1	r3


	/* Clear any debug events which occurred since we disabled MSR[DE].
	/* Clear any debug events which occurred since we disabled MSR[DE].
	 * XXX This gives us a 3-instruction window in which a breakpoint
	 * XXX This gives us a 3-instruction window in which a breakpoint