Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3c42bf8a authored by Paul Mackerras's avatar Paul Mackerras Committed by Avi Kivity
Browse files

KVM: PPC: Split host-state fields out of kvmppc_book3s_shadow_vcpu



There are several fields in struct kvmppc_book3s_shadow_vcpu that
temporarily store bits of host state while a guest is running,
rather than anything relating to the particular guest or vcpu.
This splits them out into a new kvmppc_host_state structure and
modifies the definitions in asm-offsets.c to suit.

On 32-bit, we have a kvmppc_host_state structure inside the
kvmppc_book3s_shadow_vcpu since the assembly code needs to be able
to get to them both with one pointer.  On 64-bit they are separate
fields in the PACA.  This means that on 64-bit we don't need to
copy the kvmppc_host_state in and out on vcpu load/unload, and
in future will mean that the book3s_hv code doesn't need a
shadow_vcpu struct in the PACA at all.  That does mean that we
have to be careful not to rely on any values persisting in the
hstate field of the paca across any point where we could block
or get preempted.

Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 923c53ca
Loading
Loading
Loading
Loading
+5 −5
Original line number Original line Diff line number Diff line
@@ -96,16 +96,16 @@
	EXCEPTION_PROLOG_PSERIES_1(label, h);
	EXCEPTION_PROLOG_PSERIES_1(label, h);


#define __KVMTEST(n)							\
#define __KVMTEST(n)							\
	lbz	r10,PACA_KVM_SVCPU+SVCPU_IN_GUEST(r13);			\
	lbz	r10,HSTATE_IN_GUEST(r13);			\
	cmpwi	r10,0;							\
	cmpwi	r10,0;							\
	bne	do_kvm_##n
	bne	do_kvm_##n


#define __KVM_HANDLER(area, h, n)					\
#define __KVM_HANDLER(area, h, n)					\
do_kvm_##n:								\
do_kvm_##n:								\
	ld	r10,area+EX_R10(r13);					\
	ld	r10,area+EX_R10(r13);					\
	stw	r9,PACA_KVM_SVCPU+SVCPU_SCRATCH1(r13);			\
	stw	r9,HSTATE_SCRATCH1(r13);			\
	ld	r9,area+EX_R9(r13);					\
	ld	r9,area+EX_R9(r13);					\
	std	r12,PACA_KVM_SVCPU+SVCPU_SCRATCH0(r13);			\
	std	r12,HSTATE_SCRATCH0(r13);			\
	li	r12,n;							\
	li	r12,n;							\
	b	kvmppc_interrupt
	b	kvmppc_interrupt


@@ -114,9 +114,9 @@ do_kvm_##n: \
	cmpwi	r10,KVM_GUEST_MODE_SKIP;				\
	cmpwi	r10,KVM_GUEST_MODE_SKIP;				\
	ld	r10,area+EX_R10(r13);					\
	ld	r10,area+EX_R10(r13);					\
	beq	89f;							\
	beq	89f;							\
	stw	r9,PACA_KVM_SVCPU+SVCPU_SCRATCH1(r13);			\
	stw	r9,HSTATE_SCRATCH1(r13);			\
	ld	r9,area+EX_R9(r13);					\
	ld	r9,area+EX_R9(r13);					\
	std	r12,PACA_KVM_SVCPU+SVCPU_SCRATCH0(r13);			\
	std	r12,HSTATE_SCRATCH0(r13);			\
	li	r12,n;							\
	li	r12,n;							\
	b	kvmppc_interrupt;					\
	b	kvmppc_interrupt;					\
89:	mtocrf	0x80,r9;						\
89:	mtocrf	0x80,r9;						\
+19 −8
Original line number Original line Diff line number Diff line
@@ -60,6 +60,22 @@ kvmppc_resume_\intno:


#else  /*__ASSEMBLY__ */
#else  /*__ASSEMBLY__ */


/*
 * This struct goes in the PACA on 64-bit processors.  It is used
 * to store host state that needs to be saved when we enter a guest
 * and restored when we exit, but isn't specific to any particular
 * guest or vcpu.  It also has some scratch fields used by the guest
 * exit code.
 */
struct kvmppc_host_state {
	ulong host_r1;
	ulong host_r2;
	ulong vmhandler;
	ulong scratch0;
	ulong scratch1;
	u8 in_guest;
};

struct kvmppc_book3s_shadow_vcpu {
struct kvmppc_book3s_shadow_vcpu {
	ulong gpr[14];
	ulong gpr[14];
	u32 cr;
	u32 cr;
@@ -73,17 +89,12 @@ struct kvmppc_book3s_shadow_vcpu {
	ulong shadow_srr1;
	ulong shadow_srr1;
	ulong fault_dar;
	ulong fault_dar;


	ulong host_r1;
	ulong host_r2;
	ulong handler;
	ulong scratch0;
	ulong scratch1;
	ulong vmhandler;
	u8 in_guest;

#ifdef CONFIG_PPC_BOOK3S_32
#ifdef CONFIG_PPC_BOOK3S_32
	u32     sr[16];			/* Guest SRs */
	u32     sr[16];			/* Guest SRs */

	struct kvmppc_host_state hstate;
#endif
#endif

#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_BOOK3S_64
	u8 slb_max;			/* highest used guest slb entry */
	u8 slb_max;			/* highest used guest slb entry */
	struct  {
	struct  {
+1 −0
Original line number Original line Diff line number Diff line
@@ -149,6 +149,7 @@ struct paca_struct {
#ifdef CONFIG_KVM_BOOK3S_HANDLER
#ifdef CONFIG_KVM_BOOK3S_HANDLER
	/* We use this to store guest state in */
	/* We use this to store guest state in */
	struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
	struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
	struct kvmppc_host_state kvm_hstate;
#endif
#endif
};
};


+47 −47
Original line number Original line Diff line number Diff line
@@ -198,11 +198,6 @@ int main(void)
	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
	DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
	DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
	DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
	DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
	DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
	DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
	DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
#endif
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_PPC64 */


	/* RTAS */
	/* RTAS */
@@ -416,49 +411,54 @@ int main(void)
	DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
	DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
	DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
	DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
	DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
	DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
	DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -

			   offsetof(struct kvmppc_vcpu_book3s, vcpu));
#ifdef CONFIG_PPC_BOOK3S_64
	DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
	DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
	DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
#else	/* 32-bit */
	DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
	DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
	DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
#endif
	DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));

	DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
	SVCPU_FIELD(SVCPU_CR, cr);
	DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
	SVCPU_FIELD(SVCPU_XER, xer);
	DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
	SVCPU_FIELD(SVCPU_CTR, ctr);
	DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
	SVCPU_FIELD(SVCPU_LR, lr);
	DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
	SVCPU_FIELD(SVCPU_PC, pc);
	DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
	SVCPU_FIELD(SVCPU_R0, gpr[0]);
	DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
	SVCPU_FIELD(SVCPU_R1, gpr[1]);
	DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
	SVCPU_FIELD(SVCPU_R2, gpr[2]);
	DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
	SVCPU_FIELD(SVCPU_R3, gpr[3]);
	DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
	SVCPU_FIELD(SVCPU_R4, gpr[4]);
	DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
	SVCPU_FIELD(SVCPU_R5, gpr[5]);
	DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
	SVCPU_FIELD(SVCPU_R6, gpr[6]);
	DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
	SVCPU_FIELD(SVCPU_R7, gpr[7]);
	DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
	SVCPU_FIELD(SVCPU_R8, gpr[8]);
	DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
	SVCPU_FIELD(SVCPU_R9, gpr[9]);
					 vmhandler));
	SVCPU_FIELD(SVCPU_R10, gpr[10]);
	DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
	SVCPU_FIELD(SVCPU_R11, gpr[11]);
					scratch0));
	SVCPU_FIELD(SVCPU_R12, gpr[12]);
	DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
	SVCPU_FIELD(SVCPU_R13, gpr[13]);
					scratch1));
	SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
	DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
	SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
					in_guest));
	SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
	DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
	SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
					   fault_dsisr));
	DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
					 fault_dar));
	DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
					 last_inst));
	DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
					   shadow_srr1));
#ifdef CONFIG_PPC_BOOK3S_32
#ifdef CONFIG_PPC_BOOK3S_32
	DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
	SVCPU_FIELD(SVCPU_SR, sr);
#endif
#endif
#else
#ifdef CONFIG_PPC64
	SVCPU_FIELD(SVCPU_SLB, slb);
	SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
#endif

	HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
	HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);

#else /* CONFIG_PPC_BOOK3S */
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
	DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
	DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -468,7 +468,7 @@ int main(void)
	DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
	DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
	DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
	DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_PPC_BOOK3S */
#endif
#endif /* CONFIG_KVM */


#ifdef CONFIG_KVM_GUEST
#ifdef CONFIG_KVM_GUEST
	DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
	DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
+1 −1
Original line number Original line Diff line number Diff line
@@ -298,7 +298,7 @@ data_access_check_stab:
	srdi	r10,r10,60
	srdi	r10,r10,60
	rlwimi	r10,r9,16,0x20
	rlwimi	r10,r9,16,0x20
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
	lbz	r9,PACA_KVM_SVCPU+SVCPU_IN_GUEST(r13)
	lbz	r9,HSTATE_IN_GUEST(r13)
	rlwimi	r10,r9,8,0x300
	rlwimi	r10,r9,8,0x300
#endif
#endif
	mfcr	r9
	mfcr	r9
Loading