Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81aec522 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity
Browse files

KVM: ia64: Implement a uniform vps interface



An uniform entry kvm_vps_entry is added for
vps_sync_write/read, vps_resume_handler/guest,
and branches to differnt PAL service according to the offset.

Singed-off-by: default avatarAnthony Xu <anthony.xu@intel.com>
Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 271b0528
Loading
Loading
Loading
Loading
+7 −16
Original line number Diff line number Diff line
@@ -50,27 +50,18 @@

#define PAL_VSA_SYNC_READ						\
	/* begin to call pal vps sync_read */				\
	add r25 = VMM_VPD_BASE_OFFSET, r21;				\
	adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21;  /* entry point */	\
	;;								\
	ld8 r25 = [r25];      /* read vpd base */			\
	ld8 r20 = [r20];						\
	;;								\
	add r20 = PAL_VPS_SYNC_READ,r20;				\
	;;								\
{.mii;									\
	add r25 = VMM_VPD_BASE_OFFSET, r21;				\
	nop 0x0;							\
	mov r24=ip;							\
	mov b0 = r20;							\
	;;								\
};									\
{ .mmb;									\
}									\
{.mmb									\
	add r24=0x20, r24;						\
	nop 0x0;							\
	br.cond.sptk b0;        /*  call the service */			\
	ld8 r25 = [r25];      /* read vpd base */			\
	br.cond.sptk kvm_vps_sync_read;		/*call the service*/	\
	;;								\
};

};									\


#define KVM_MINSTATE_GET_CURRENT(reg)   mov reg=r21
+69 −0
Original line number Diff line number Diff line
@@ -20,6 +20,75 @@
#define ACCE_MOV_TO_PSR
#define ACCE_THASH

ENTRY(kvm_vps_entry)
	adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
	;;
	ld8 r29 = [r29]
	;;
	add r29 = r29, r30
	;;
	mov b0 = r29
	br.sptk.many b0
END(kvm_vps_entry)

/*
 *	Inputs:
 *	r24 : return address
 *  	r25 : vpd
 *	r29 : scratch
 *
 */
GLOBAL_ENTRY(kvm_vps_sync_read)
	movl r30 = PAL_VPS_SYNC_READ
	;;
	br.sptk.many kvm_vps_entry
END(kvm_vps_sync_read)

/*
 *	Inputs:
 *	r24 : return address
 *  	r25 : vpd
 *	r29 : scratch
 *
 */
GLOBAL_ENTRY(kvm_vps_sync_write)
	movl r30 = PAL_VPS_SYNC_WRITE
	;;
	br.sptk.many kvm_vps_entry
END(kvm_vps_sync_write)

/*
 *	Inputs:
 *	r23 : pr
 *	r24 : guest b0
 *  	r25 : vpd
 *
 */
GLOBAL_ENTRY(kvm_vps_resume_normal)
	movl r30 = PAL_VPS_RESUME_NORMAL
	;;
	mov pr=r23,-2
	br.sptk.many kvm_vps_entry
END(kvm_vps_resume_normal)

/*
 *	Inputs:
 *	r23 : pr
 *	r24 : guest b0
 *  	r25 : vpd
 *	r17 : isr
 */
GLOBAL_ENTRY(kvm_vps_resume_handler)
	movl r30 = PAL_VPS_RESUME_HANDLER
	;;
	ld8 r27=[r25]
	shr r17=r17,IA64_ISR_IR_BIT
	;;
	dep r27=r17,r27,63,1   // bit 63 of r27 indicate whether enable CFLE
	mov pr=r23,-2
	br.sptk.many kvm_vps_entry
END(kvm_vps_resume_handler)

//mov r1=ar3
GLOBAL_ENTRY(kvm_asm_mov_from_ar)
#ifndef ACCE_MOV_FROM_AR
+2 −2
Original line number Diff line number Diff line
@@ -962,9 +962,9 @@ static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
void vmm_transition(struct kvm_vcpu *vcpu)
{
	ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
			0, 0, 0, 0, 0, 0);
			1, 0, 0, 0, 0, 0);
	vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
	ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
						0, 0, 0, 0, 0, 0);
						1, 0, 0, 0, 0, 0);
	kvm_do_resume_op(vcpu);
}
+6 −33
Original line number Diff line number Diff line
@@ -1261,11 +1261,6 @@ kvm_rse_clear_invalid:
    adds r19=VMM_VPD_VPSR_OFFSET,r18
    ;;
    ld8 r19=[r19]        //vpsr
    adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
    ;;
    ld8 r20=[r20]
    ;;
//vsa_sync_write_start
    mov r25=r18
    adds r16= VMM_VCPU_GP_OFFSET,r21
    ;;
@@ -1274,10 +1269,7 @@ kvm_rse_clear_invalid:
    ;;
    add  r24=r24,r16
    ;;
    add r16=PAL_VPS_SYNC_WRITE,r20
    ;;
    mov b0=r16
    br.cond.sptk b0         // call the service
    br.sptk.many  kvm_vps_sync_write       // call the service
    ;;
END(ia64_leave_hypervisor)
// fall through
@@ -1288,28 +1280,15 @@ GLOBAL_ENTRY(ia64_vmm_entry)
 *  r17:cr.isr
 *  r18:vpd
 *  r19:vpsr
 *  r20:__vsa_base
 *  r22:b0
 *  r23:predicate
 */
    mov r24=r22
    mov r25=r18
    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
    (p1) br.cond.sptk.few kvm_vps_resume_normal
    (p2) br.cond.sptk.many kvm_vps_resume_handler
    ;;
    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
    (p1) br.sptk.many ia64_vmm_entry_out
    ;;
    tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT		//p1=cr.isr.ir
    ;;
    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
    (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
    (p2) ld8 r26=[r25]
    ;;
ia64_vmm_entry_out:
    mov pr=r23,-2
    mov b0=r29
    ;;
    br.cond.sptk b0             // call pal service
END(ia64_vmm_entry)


@@ -1376,6 +1355,9 @@ GLOBAL_ENTRY(vmm_reset_entry)
    //set up ipsr, iip, vpd.vpsr, dcr
    // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
    // For DCR: all bits 0
    bsw.0
    ;;
    mov r21 =r13
    adds r14=-VMM_PT_REGS_SIZE, r12
    ;;
    movl r6=0x501008826000      // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
@@ -1387,12 +1369,6 @@ GLOBAL_ENTRY(vmm_reset_entry)
    ;;
    srlz.i
    ;;
    bsw.0
    ;;
    mov r21 =r13
    ;;
    bsw.1
    ;;
    mov ar.rsc = 0
    ;;
    flushrs
@@ -1406,12 +1382,9 @@ GLOBAL_ENTRY(vmm_reset_entry)
    ld8 r1 = [r20]
    ;;
    mov cr.iip=r4
    ;;
    adds r16=VMM_VPD_BASE_OFFSET,r13
    adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
    ;;
    ld8 r18=[r16]
    ld8 r20=[r20]
    ;;
    adds r19=VMM_VPD_VPSR_OFFSET,r18
    ;;