Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 021ec9c6 authored by Alexander Graf's avatar Alexander Graf Committed by Marcelo Tosatti
Browse files

KVM: PPC: Call SLB patching code in interrupt safe manner



Currently we're racy when doing the transition from IR=1 to IR=0, from
the module memory entry code to the real mode SLB switching code.

To work around that I took a look at the RTAS entry code which is faced
with a similar problem and did the same thing:

  A small helper in linear mapped memory that does mtmsr with IR=0 and
  then RFIs info the actual handler.

Thanks to that trick we can safely take page faults in the entry code
and only need to be really wary of what to do as of the SLB switching
part.

Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent bc90923e
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -121,6 +121,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,


extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
extern u32 kvmppc_trampoline_enter;
extern void kvmppc_rmcall(ulong srr0, ulong srr1);


static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
{
+0 −1
Original line number Original line Diff line number Diff line
@@ -69,7 +69,6 @@ struct kvmppc_book3s_shadow_vcpu {
	ulong scratch0;
	ulong scratch0;
	ulong scratch1;
	ulong scratch1;
	ulong vmhandler;
	ulong vmhandler;
	ulong rmhandler;
};
};


#endif /*__ASSEMBLY__ */
#endif /*__ASSEMBLY__ */
+1 −0
Original line number Original line Diff line number Diff line
@@ -167,6 +167,7 @@ struct kvm_vcpu_arch {
	ulong trampoline_lowmem;
	ulong trampoline_lowmem;
	ulong trampoline_enter;
	ulong trampoline_enter;
	ulong highmem_handler;
	ulong highmem_handler;
	ulong rmcall;
	ulong host_paca_phys;
	ulong host_paca_phys;
	struct kvmppc_mmu mmu;
	struct kvmppc_mmu mmu;
#endif
#endif
+1 −2
Original line number Original line Diff line number Diff line
@@ -214,8 +214,6 @@ int main(void)
	DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
	DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
	DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
	DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
					    shadow_vcpu.vmhandler));
					    shadow_vcpu.vmhandler));
	DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
					    shadow_vcpu.rmhandler));
	DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
	DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
					   shadow_vcpu.scratch0));
					   shadow_vcpu.scratch0));
	DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
	DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
@@ -438,6 +436,7 @@ int main(void)
	DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
	DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
	DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
	DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
	DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
	DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
	DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
	DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
	DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
#else
#else
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+1 −0
Original line number Original line Diff line number Diff line
@@ -919,6 +919,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
	vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
	vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
	vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
	vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
	vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
	vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
	vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;


	vcpu->arch.shadow_msr = MSR_USER64;
	vcpu->arch.shadow_msr = MSR_USER64;


Loading