Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27d14644 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity
Browse files

KVM: ia64: vTLB change for enabling windows 2008 boot



Simply the logic of hash vTLB, and export kvm_gpa_to_mpa.

Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 91b2ae77
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
		u64 itir, u64 ifa, int type);
extern void thash_purge_all(struct kvm_vcpu *v);
extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
void thash_init(struct thash_cb *hcb, u64 sz);

void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);

u64 kvm_gpa_to_mpa(u64 gpa);
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
		u64 arg4, u64 arg5, u64 arg6, u64 arg7);

+17 −22
Original line number Diff line number Diff line
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
	unsigned long ps, gpaddr;

	ps = itir_ps(itir);
	rr.val = ia64_get_rr(ifa);

	 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
					(ifa & ((1UL << ps) - 1));

	rr.val = ia64_get_rr(ifa);
	head = (struct thash_data *)ia64_thash(ifa);
	head->etag = INVALID_TI_TAG;
	ia64_mf();
@@ -413,15 +413,13 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
/*
 * Purge overlap TCs and then insert the new entry to emulate itc ops.
 * Notes: Only TC entry can purge and insert.
 *    1 indicates this is MMIO
 */
int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
void  thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
						u64 ifa, int type)
{
	u64 ps;
	u64 phy_pte, io_mask, index;
	union ia64_rr vrr, mrr;
	int ret = 0;

	ps = itir_ps(itir);
	vrr.val = vcpu_get_rr(v, ifa);
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
		phy_pte &= ~_PAGE_MA_MASK;
	}

	if (pte & VTLB_PTE_IO)
		ret = 1;

	vtlb_purge(v, ifa, ps);
	vhpt_purge(v, ifa, ps);

	if (ps == mrr.ps) {
		if (!(pte&VTLB_PTE_IO)) {
			vhpt_insert(phy_pte, itir, ifa, pte);
		} else {
	if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
		vtlb_insert(v, pte, itir, ifa);
		vcpu_quick_region_set(VMX(v, tc_regions), ifa);
	}
	} else if (ps > mrr.ps) {
		vtlb_insert(v, pte, itir, ifa);
		vcpu_quick_region_set(VMX(v, tc_regions), ifa);
		if (!(pte&VTLB_PTE_IO))
	if (pte & VTLB_PTE_IO)
		return;

	if (ps >= mrr.ps)
		vhpt_insert(phy_pte, itir, ifa, pte);
	} else {
	else {
		u64 psr;
		phy_pte  &= ~PAGE_FLAGS_RV_MASK;
		psr = ia64_clear_ic();
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
	if (!(pte&VTLB_PTE_IO))
		mark_pages_dirty(v, pte, ps);

	return ret;
}

/*
@@ -570,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
u64 kvm_get_mpt_entry(u64 gpfn)
{
	u64 *base = (u64 *) KVM_P2M_BASE;

	if (gpfn >= (KVM_P2M_SIZE >> 3))
		panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);

	return *(base + gpfn);
}