Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b010eb51 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity
Browse files

KVM: ia64: add directed mmio range support for kvm guests



Using vt-d, kvm guests can be assigned physcial devices, so
this patch introduce a new mmio type (directed mmio)
to handle its mmio access.

Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 1cbea809
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -132,7 +132,7 @@
#define GPFN_IOSAPIC        (4UL << 60) /* IOSAPIC base */
#define GPFN_LEGACY_IO      (5UL << 60) /* Legacy I/O base */
#define GPFN_GFW        (6UL << 60) /* Guest Firmware */
#define GPFN_HIGH_MMIO      (7UL << 60) /* High MMIO range */
#define GPFN_PHYS_MMIO      (7UL << 60) /* Directed MMIO Range */

#define GPFN_IO_MASK        (7UL << 60) /* Guest pfn is I/O type */
#define GPFN_INV_MASK       (1UL << 63) /* Guest pfn is invalid */
+2 −2
Original line number Diff line number Diff line
@@ -1447,11 +1447,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
		if (!kvm_is_mmio_pfn(pfn)) {
			kvm_set_pmt_entry(kvm, base_gfn + i,
					pfn << PAGE_SHIFT,
					_PAGE_MA_WB);
				_PAGE_AR_RWX | _PAGE_MA_WB);
			memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
		} else {
			kvm_set_pmt_entry(kvm, base_gfn + i,
					GPFN_LOW_MMIO | (pfn << PAGE_SHIFT),
					GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
					_PAGE_MA_UC);
			memslot->rmap[i] = 0;
			}
+13 −13
Original line number Diff line number Diff line
@@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
	trp->rid = rid;
}

extern u64 kvm_lookup_mpa(u64 gpfn);
extern u64 kvm_gpa_to_mpa(u64 gpa);

/* Return I/O type if trye */
#define __gpfn_is_io(gpfn)			\
	({						\
	 u64 pte, ret = 0;			\
	 pte = kvm_lookup_mpa(gpfn);		\
	 if (!(pte & GPFN_INV_MASK))		\
	 ret = pte & GPFN_IO_MASK;	\
	 ret;					\
	 })
extern u64 kvm_get_mpt_entry(u64 gpfn);

/* Return I/ */
static inline u64 __gpfn_is_io(u64 gpfn)
{
	u64  pte;
	pte = kvm_get_mpt_entry(gpfn);
	if (!(pte & GPFN_INV_MASK)) {
		pte = pte & GPFN_IO_MASK;
		if (pte != GPFN_PHYS_MMIO)
			return pte;
	}
	return 0;
}
#endif

#define IA64_NO_FAULT	0
#define IA64_FAULT	1

+17 −6
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)

u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
{
	u64 ps, ps_mask, paddr, maddr;
	u64 ps, ps_mask, paddr, maddr, io_mask;
	union pte_flags phy_pte;

	ps = itir_ps(itir);
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
	phy_pte.val = *pte;
	paddr = *pte;
	paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
	maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
	if (maddr & GPFN_IO_MASK) {
	maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
	io_mask = maddr & GPFN_IO_MASK;
	if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
		*pte |= VTLB_PTE_IO;
		return -1;
	}
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
						u64 ifa, int type)
{
	u64 ps;
	u64 phy_pte;
	u64 phy_pte, io_mask, index;
	union ia64_rr vrr, mrr;
	int ret = 0;

@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
	vrr.val = vcpu_get_rr(v, ifa);
	mrr.val = ia64_get_rr(ifa);

	index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
	io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
	phy_pte = translate_phy_pte(&pte, itir, ifa);

	/* Ensure WB attribute if pte is related to a normal mem page,
	 * which is required by vga acceleration since qemu maps shared
	 * vram buffer with WB.
	 */
	if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
	if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
			io_mask != GPFN_PHYS_MMIO) {
		pte &= ~_PAGE_MA_MASK;
		phy_pte &= ~_PAGE_MA_MASK;
	}
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz)
	}
}

u64 kvm_lookup_mpa(u64 gpfn)
u64 kvm_get_mpt_entry(u64 gpfn)
{
	u64 *base = (u64 *) KVM_P2M_BASE;
	return *(base + gpfn);
}

u64 kvm_lookup_mpa(u64 gpfn)
{
	u64 maddr;
	maddr = kvm_get_mpt_entry(gpfn);
	return maddr&_PAGE_PPN_MASK;
}

u64 kvm_gpa_to_mpa(u64 gpa)
{
	u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);