Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4f50038 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity
Browse files

KVM: ia64: Add header files for kvm/ia64



kvm_minstate.h : Marcos about Min save routines.
lapic.h: apic structure definition.
vcpu.h : routions related to vcpu virtualization.
vti.h  : Some macros or routines for VT support on Itanium.

Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent b024b793
Loading
Loading
Loading
Loading
+273 −0
Original line number Diff line number Diff line
/*
 *  kvm_minstate.h: min save macros
 *  Copyright (c) 2007, Intel Corporation.
 *
 *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
 *  Xiantao Zhang (xiantao.zhang@intel.com)
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 */


#include <asm/asmmacro.h>
#include <asm/types.h>
#include <asm/kregs.h>
#include "asm-offsets.h"

#define KVM_MINSTATE_START_SAVE_MIN	     					\
	mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
	;;									\
	mov.m r28 = ar.rnat;                                  			\
	addl r22 = VMM_RBS_OFFSET,r1;            /* compute base of RBS */	\
	;;									\
	lfetch.fault.excl.nt1 [r22];						\
	addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1;  /* compute base of memory stack */  \
	mov r23 = ar.bspstore;			/* save ar.bspstore */          \
	;;									\
	mov ar.bspstore = r22;				/* switch to kernel RBS */\
	;;									\
	mov r18 = ar.bsp;							\
	mov ar.rsc = 0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */



#define KVM_MINSTATE_END_SAVE_MIN						\
	bsw.1;          /* switch back to bank 1 (must be last in insn group) */\
	;;


#define PAL_VSA_SYNC_READ						\
	/* begin to call pal vps sync_read */				\
	add r25 = VMM_VPD_BASE_OFFSET, r21;				\
	adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21;  /* entry point */	\
	;;								\
	ld8 r25 = [r25];      /* read vpd base */			\
	ld8 r20 = [r20];						\
	;;								\
	add r20 = PAL_VPS_SYNC_READ,r20;				\
	;;								\
{ .mii;									\
	nop 0x0;							\
	mov r24 = ip;							\
	mov b0 = r20;							\
	;;								\
};									\
{ .mmb;									\
	add r24 = 0x20, r24;						\
	nop 0x0;							\
	br.cond.sptk b0;        /*  call the service */			\
	;;								\
};



#define KVM_MINSTATE_GET_CURRENT(reg)   mov reg=r21

/*
 * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
 * the minimum state necessary that allows us to turn psr.ic back
 * on.
 *
 * Assumed state upon entry:
 *  psr.ic: off
 *  r31:	contains saved predicates (pr)
 *
 * Upon exit, the state is as follows:
 *  psr.ic: off
 *   r2 = points to &pt_regs.r16
 *   r8 = contents of ar.ccv
 *   r9 = contents of ar.csd
 *  r10 = contents of ar.ssd
 *  r11 = FPSR_DEFAULT
 *  r12 = kernel sp (kernel virtual address)
 *  r13 = points to current task_struct (kernel virtual address)
 *  p15 = TRUE if psr.i is set in cr.ipsr
 *  predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
 *	  preserved
 *
 * Note that psr.ic is NOT turned on by this macro.  This is so that
 * we can pass interruption state as arguments to a handler.
 */


#define PT(f) (VMM_PT_REGS_##f##_OFFSET)

#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)			\
	KVM_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */	\
	mov r27 = ar.rsc;         /* M */			\
	mov r20 = r1;         /* A */				\
	mov r25 = ar.unat;        /* M */			\
	mov r29 = cr.ipsr;        /* M */			\
	mov r26 = ar.pfs;         /* I */			\
	mov r18 = cr.isr;         				\
	COVER;              /* B;; (or nothing) */		\
	;;							\
	tbit.z p0,p15 = r29,IA64_PSR_I_BIT;			\
	mov r1 = r16;						\
/*	mov r21=r16;	*/					\
	/* switch from user to kernel RBS: */			\
	;;							\
	invala;             /* M */				\
	SAVE_IFS;						\
	;;							\
	KVM_MINSTATE_START_SAVE_MIN				\
	adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */	\
	adds r16 = PT(CR_IPSR),r1;				\
	;;							\
	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;		\
	st8 [r16] = r29;      /* save cr.ipsr */		\
	;;							\
	lfetch.fault.excl.nt1 [r17];				\
	tbit.nz p15,p0 = r29,IA64_PSR_I_BIT;			\
	mov r29 = b0						\
	;;							\
	adds r16 = PT(R8),r1; /* initialize first base pointer */\
	adds r17 = PT(R9),r1; /* initialize second base pointer */\
	;;							\
.mem.offset 0,0; st8.spill [r16] = r8,16;			\
.mem.offset 8,0; st8.spill [r17] = r9,16;			\
	;;							\
.mem.offset 0,0; st8.spill [r16] = r10,24;			\
.mem.offset 8,0; st8.spill [r17] = r11,24;			\
	;;							\
	mov r9 = cr.iip;         /* M */			\
	mov r10 = ar.fpsr;        /* M */			\
	;;							\
	st8 [r16] = r9,16;    /* save cr.iip */			\
	st8 [r17] = r30,16;   /* save cr.ifs */			\
	sub r18 = r18,r22;    /* r18=RSE.ndirty*8 */		\
	;;							\
	st8 [r16] = r25,16;   /* save ar.unat */		\
	st8 [r17] = r26,16;    /* save ar.pfs */		\
	shl r18 = r18,16;     /* calu ar.rsc used for "loadrs" */\
	;;							\
	st8 [r16] = r27,16;   /* save ar.rsc */			\
	st8 [r17] = r28,16;   /* save ar.rnat */		\
	;;          /* avoid RAW on r16 & r17 */		\
	st8 [r16] = r23,16;   /* save ar.bspstore */		\
	st8 [r17] = r31,16;   /* save predicates */		\
	;;							\
	st8 [r16] = r29,16;   /* save b0 */			\
	st8 [r17] = r18,16;   /* save ar.rsc value for "loadrs" */\
	;;							\
.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */  \
.mem.offset 8,0; st8.spill [r17] = r12,16;			\
	adds r12 = -16,r1;    /* switch to kernel memory stack */  \
	;;							\
.mem.offset 0,0; st8.spill [r16] = r13,16;			\
.mem.offset 8,0; st8.spill [r17] = r10,16;	/* save ar.fpsr */\
	mov r13 = r21;   /* establish `current' */		\
	;;							\
.mem.offset 0,0; st8.spill [r16] = r15,16;			\
.mem.offset 8,0; st8.spill [r17] = r14,16;			\
	;;							\
.mem.offset 0,0; st8.spill [r16] = r2,16;			\
.mem.offset 8,0; st8.spill [r17] = r3,16;			\
	adds r2 = VMM_PT_REGS_R16_OFFSET,r1;			\
	 ;;							\
	adds r16 = VMM_VCPU_IIPA_OFFSET,r13;			\
	adds r17 = VMM_VCPU_ISR_OFFSET,r13;			\
	mov r26 = cr.iipa;					\
	mov r27 = cr.isr;					\
	;;							\
	st8 [r16] = r26;					\
	st8 [r17] = r27;					\
	;;							\
	EXTRA;							\
	mov r8 = ar.ccv;					\
	mov r9 = ar.csd;					\
	mov r10 = ar.ssd;					\
	movl r11 = FPSR_DEFAULT;   /* L-unit */			\
	adds r17 = VMM_VCPU_GP_OFFSET,r13;			\
	;;							\
	ld8 r1 = [r17];/* establish kernel global pointer */	\
	;;							\
	PAL_VSA_SYNC_READ					\
	KVM_MINSTATE_END_SAVE_MIN

/*
 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
 *
 * Assumed state upon entry:
 *  psr.ic: on
 *  r2: points to &pt_regs.f6
 *  r3: points to &pt_regs.f7
 *  r8: contents of ar.ccv
 *  r9: contents of ar.csd
 *  r10:	contents of ar.ssd
 *  r11:	FPSR_DEFAULT
 *
 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
 */
#define KVM_SAVE_REST				\
.mem.offset 0,0; st8.spill [r2] = r16,16;	\
.mem.offset 8,0; st8.spill [r3] = r17,16;	\
	;;				\
.mem.offset 0,0; st8.spill [r2] = r18,16;	\
.mem.offset 8,0; st8.spill [r3] = r19,16;	\
	;;				\
.mem.offset 0,0; st8.spill [r2] = r20,16;	\
.mem.offset 8,0; st8.spill [r3] = r21,16;	\
	mov r18=b6;			\
	;;				\
.mem.offset 0,0; st8.spill [r2] = r22,16;	\
.mem.offset 8,0; st8.spill [r3] = r23,16;	\
	mov r19 = b7;				\
	;;					\
.mem.offset 0,0; st8.spill [r2] = r24,16;	\
.mem.offset 8,0; st8.spill [r3] = r25,16;	\
	;;					\
.mem.offset 0,0; st8.spill [r2] = r26,16;	\
.mem.offset 8,0; st8.spill [r3] = r27,16;	\
	;;					\
.mem.offset 0,0; st8.spill [r2] = r28,16;	\
.mem.offset 8,0; st8.spill [r3] = r29,16;	\
	;;					\
.mem.offset 0,0; st8.spill [r2] = r30,16;	\
.mem.offset 8,0; st8.spill [r3] = r31,32;	\
	;;					\
	mov ar.fpsr = r11;			\
	st8 [r2] = r8,8;			\
	adds r24 = PT(B6)-PT(F7),r3;		\
	adds r25 = PT(B7)-PT(F7),r3;		\
	;;					\
	st8 [r24] = r18,16;       /* b6 */	\
	st8 [r25] = r19,16;       /* b7 */	\
	adds r2 = PT(R4)-PT(F6),r2;		\
	adds r3 = PT(R5)-PT(F7),r3;		\
	;;					\
	st8 [r24] = r9;	/* ar.csd */		\
	st8 [r25] = r10;	/* ar.ssd */	\
	;;					\
	mov r18 = ar.unat;			\
	adds r19 = PT(EML_UNAT)-PT(R4),r2;	\
	;;					\
	st8 [r19] = r18; /* eml_unat */ 	\


#define KVM_SAVE_EXTRA				\
.mem.offset 0,0; st8.spill [r2] = r4,16;	\
.mem.offset 8,0; st8.spill [r3] = r5,16;	\
	;;					\
.mem.offset 0,0; st8.spill [r2] = r6,16;	\
.mem.offset 8,0; st8.spill [r3] = r7;		\
	;;					\
	mov r26 = ar.unat;			\
	;;					\
	st8 [r2] = r26;/* eml_unat */ 		\

#define KVM_SAVE_MIN_WITH_COVER		KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
#define KVM_SAVE_MIN_WITH_COVER_R19	KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
#define KVM_SAVE_MIN			KVM_DO_SAVE_MIN(     , mov r30 = r0, )

arch/ia64/kvm/lapic.h

0 → 100644
+25 −0
Original line number Diff line number Diff line
#ifndef __KVM_IA64_LAPIC_H
#define __KVM_IA64_LAPIC_H

#include <linux/kvm_host.h>

/*
 * vlsapic
 */
struct kvm_lapic{
	struct kvm_vcpu *vcpu;
	uint64_t insvc[4];
	uint64_t vhpi;
	uint8_t xtp;
	uint8_t pal_init_pending;
	uint8_t pad[2];
};

int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);

int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);

#endif

arch/ia64/kvm/misc.h

0 → 100644
+93 −0
Original line number Diff line number Diff line
#ifndef __KVM_IA64_MISC_H
#define __KVM_IA64_MISC_H

#include <linux/kvm_host.h>
/*
 * misc.h
 * 	Copyright (C) 2007, Intel Corporation.
 *  	Xiantao Zhang  (xiantao.zhang@intel.com)
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 */

/*
 *Return p2m base address at host side!
 */
static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
{
	return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS);
}

static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
		u64 paddr, u64 mem_flags)
{
	uint64_t *pmt_base = kvm_host_get_pmt(kvm);
	unsigned long pte;

	pte = PAGE_ALIGN(paddr) | mem_flags;
	pmt_base[gfn] = pte;
}

/*Function for translating host address to guest address*/

static inline void *to_guest(struct kvm *kvm, void *addr)
{
	return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
			KVM_VM_DATA_BASE);
}

/*Function for translating guest address to host address*/

static inline void *to_host(struct kvm *kvm, void *addr)
{
	return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
			+ kvm->arch.vm_base);
}

/* Get host context of the vcpu */
static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
{
	union context *ctx = &vcpu->arch.host;
	return to_guest(vcpu->kvm, ctx);
}

/* Get guest context of the vcpu */
static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
{
	union context *ctx = &vcpu->arch.guest;
	return  to_guest(vcpu->kvm, ctx);
}

/* kvm get exit data from gvmm! */
static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
{
	return &vcpu->arch.exit_data;
}

/*kvm get vcpu ioreq for kvm module!*/
static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
{
	struct exit_ctl_data *p_ctl_data;

	if (vcpu) {
		p_ctl_data = kvm_get_exit_data(vcpu);
		if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
			return &p_ctl_data->u.ioreq;
	}

	return NULL;
}

#endif

arch/ia64/kvm/vcpu.h

0 → 100644
+740 −0

File added.

Preview size limit exceeded, changes collapsed.

arch/ia64/kvm/vti.h

0 → 100644
+290 −0
Original line number Diff line number Diff line
/*
 * vti.h: prototype for generial vt related interface
 *   	Copyright (c) 2004, Intel Corporation.
 *
 *	Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
 *	Fred Yang (fred.yang@intel.com)
 * 	Kun Tian (Kevin Tian) (kevin.tian@intel.com)
 *
 *  	Copyright (c) 2007, Intel Corporation.
 *  	Zhang xiantao <xiantao.zhang@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 */
#ifndef _KVM_VT_I_H
#define _KVM_VT_I_H

#ifndef __ASSEMBLY__
#include <asm/page.h>

#include <linux/kvm_host.h>

/* define itr.i and itr.d  in ia64_itr function */
#define	ITR	0x01
#define	DTR	0x02
#define	IaDTR	0x03

#define IA64_TR_VMM       6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
#define IA64_TR_VM_DATA   7 /*dtr7       : maps current vm data*/

#define RR6 (6UL<<61)
#define RR7 (7UL<<61)


/* config_options in pal_vp_init_env */
#define	VP_INITIALIZE	1UL
#define	VP_FR_PMC	1UL<<1
#define	VP_OPCODE	1UL<<8
#define	VP_CAUSE	1UL<<9
#define VP_FW_ACC   	1UL<<63

/* init vp env with initializing vm_buffer */
#define	VP_INIT_ENV_INITALIZE  (VP_INITIALIZE | VP_FR_PMC |\
	VP_OPCODE | VP_CAUSE | VP_FW_ACC)
/* init vp env without initializing vm_buffer */
#define	VP_INIT_ENV  VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC

#define		PAL_VP_CREATE   265
/* Stacked Virt. Initializes a new VPD for the operation of
 * a new virtual processor in the virtual environment.
 */
#define		PAL_VP_ENV_INFO 266
/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
#define		PAL_VP_EXIT_ENV 267
/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
#define		PAL_VP_INIT_ENV 268
/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
#define		PAL_VP_REGISTER 269
/*Stacked Virt. Register a different host IVT for the virtual processor.*/
#define		PAL_VP_RESUME   270
/* Renamed from PAL_VP_RESUME */
#define		PAL_VP_RESTORE  270
/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
#define		PAL_VP_SUSPEND  271
/* Renamed from PAL_VP_SUSPEND */
#define		PAL_VP_SAVE	271
/* Stacked Virt. Suspends operation for the specified virtual processor on
 * the logical processor.
 */
#define		PAL_VP_TERMINATE 272
/* Stacked Virt. Terminates operation for the specified virtual processor.*/

union vac {
	unsigned long value;
	struct {
		int a_int:1;
		int a_from_int_cr:1;
		int a_to_int_cr:1;
		int a_from_psr:1;
		int a_from_cpuid:1;
		int a_cover:1;
		int a_bsw:1;
		long reserved:57;
	};
};

union vdc {
	unsigned long value;
	struct {
		int d_vmsw:1;
		int d_extint:1;
		int d_ibr_dbr:1;
		int d_pmc:1;
		int d_to_pmd:1;
		int d_itm:1;
		long reserved:58;
	};
};

struct vpd {
	union vac   vac;
	union vdc   vdc;
	unsigned long  virt_env_vaddr;
	unsigned long  reserved1[29];
	unsigned long  vhpi;
	unsigned long  reserved2[95];
	unsigned long  vgr[16];
	unsigned long  vbgr[16];
	unsigned long  vnat;
	unsigned long  vbnat;
	unsigned long  vcpuid[5];
	unsigned long  reserved3[11];
	unsigned long  vpsr;
	unsigned long  vpr;
	unsigned long  reserved4[76];
	union {
		unsigned long  vcr[128];
		struct {
			unsigned long dcr;
			unsigned long itm;
			unsigned long iva;
			unsigned long rsv1[5];
			unsigned long pta;
			unsigned long rsv2[7];
			unsigned long ipsr;
			unsigned long isr;
			unsigned long rsv3;
			unsigned long iip;
			unsigned long ifa;
			unsigned long itir;
			unsigned long iipa;
			unsigned long ifs;
			unsigned long iim;
			unsigned long iha;
			unsigned long rsv4[38];
			unsigned long lid;
			unsigned long ivr;
			unsigned long tpr;
			unsigned long eoi;
			unsigned long irr[4];
			unsigned long itv;
			unsigned long pmv;
			unsigned long cmcv;
			unsigned long rsv5[5];
			unsigned long lrr0;
			unsigned long lrr1;
			unsigned long rsv6[46];
		};
	};
	unsigned long  reserved5[128];
	unsigned long  reserved6[3456];
	unsigned long  vmm_avail[128];
	unsigned long  reserved7[4096];
};

#define PAL_PROC_VM_BIT		(1UL << 40)
#define PAL_PROC_VMSW_BIT	(1UL << 54)

static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
		u64 *vp_env_info)
{
	struct ia64_pal_retval iprv;
	PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
	*buffer_size = iprv.v0;
	*vp_env_info = iprv.v1;
	return iprv.status;
}

static inline s64 ia64_pal_vp_exit_env(u64 iva)
{
	struct ia64_pal_retval iprv;

	PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
	return iprv.status;
}

static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
			u64 vbase_addr, u64 *vsa_base)
{
	struct ia64_pal_retval iprv;

	PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
			vbase_addr);
	*vsa_base = iprv.v0;

	return iprv.status;
}

static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
{
	struct ia64_pal_retval iprv;

	PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);

	return iprv.status;
}

static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
{
	struct ia64_pal_retval iprv;

	PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);

	return iprv.status;
}

#endif

/*VPD field offset*/
#define VPD_VAC_START_OFFSET		0
#define VPD_VDC_START_OFFSET		8
#define VPD_VHPI_START_OFFSET		256
#define VPD_VGR_START_OFFSET		1024
#define VPD_VBGR_START_OFFSET		1152
#define VPD_VNAT_START_OFFSET		1280
#define VPD_VBNAT_START_OFFSET		1288
#define VPD_VCPUID_START_OFFSET		1296
#define VPD_VPSR_START_OFFSET		1424
#define VPD_VPR_START_OFFSET		1432
#define VPD_VRSE_CFLE_START_OFFSET	1440
#define VPD_VCR_START_OFFSET		2048
#define VPD_VTPR_START_OFFSET		2576
#define VPD_VRR_START_OFFSET		3072
#define VPD_VMM_VAIL_START_OFFSET	31744

/*Virtualization faults*/

#define EVENT_MOV_TO_AR			 1
#define EVENT_MOV_TO_AR_IMM		 2
#define EVENT_MOV_FROM_AR		 3
#define EVENT_MOV_TO_CR			 4
#define EVENT_MOV_FROM_CR		 5
#define EVENT_MOV_TO_PSR		 6
#define EVENT_MOV_FROM_PSR		 7
#define EVENT_ITC_D			 8
#define EVENT_ITC_I			 9
#define EVENT_MOV_TO_RR			 10
#define EVENT_MOV_TO_DBR		 11
#define EVENT_MOV_TO_IBR		 12
#define EVENT_MOV_TO_PKR		 13
#define EVENT_MOV_TO_PMC		 14
#define EVENT_MOV_TO_PMD		 15
#define EVENT_ITR_D			 16
#define EVENT_ITR_I			 17
#define EVENT_MOV_FROM_RR		 18
#define EVENT_MOV_FROM_DBR		 19
#define EVENT_MOV_FROM_IBR		 20
#define EVENT_MOV_FROM_PKR		 21
#define EVENT_MOV_FROM_PMC		 22
#define EVENT_MOV_FROM_CPUID		 23
#define EVENT_SSM			 24
#define EVENT_RSM			 25
#define EVENT_PTC_L			 26
#define EVENT_PTC_G			 27
#define EVENT_PTC_GA			 28
#define EVENT_PTR_D			 29
#define EVENT_PTR_I			 30
#define EVENT_THASH			 31
#define EVENT_TTAG			 32
#define EVENT_TPA			 33
#define EVENT_TAK			 34
#define EVENT_PTC_E			 35
#define EVENT_COVER			 36
#define EVENT_RFI			 37
#define EVENT_BSW_0			 38
#define EVENT_BSW_1			 39
#define EVENT_VMSW			 40

/**PAL virtual services offsets */
#define PAL_VPS_RESUME_NORMAL           0x0000
#define PAL_VPS_RESUME_HANDLER          0x0400
#define PAL_VPS_SYNC_READ               0x0800
#define PAL_VPS_SYNC_WRITE              0x0c00
#define PAL_VPS_SET_PENDING_INTERRUPT   0x1000
#define PAL_VPS_THASH                   0x1400
#define PAL_VPS_TTAG                    0x1800
#define PAL_VPS_RESTORE                 0x1c00
#define PAL_VPS_SAVE                    0x2000

#endif/* _VT_I_H*/