Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ad26d81 authored by Marcelo Tosatti's avatar Marcelo Tosatti
Browse files

KVM: use gfn_to_pfn_memslot in kvm_iommu_map_pages



So its possible to iommu map a memslot before making it visible to
kvm.

Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 506f0d6f
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -440,8 +440,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#define KVM_IOMMU_CACHE_COHERENCY	0x1
#define KVM_IOMMU_CACHE_COHERENCY	0x1


#ifdef CONFIG_IOMMU_API
#ifdef CONFIG_IOMMU_API
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
			unsigned long npages);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_assign_device(struct kvm *kvm,
int kvm_assign_device(struct kvm *kvm,
+6 −7
Original line number Original line Diff line number Diff line
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
static void kvm_iommu_put_pages(struct kvm *kvm,
				gfn_t base_gfn, unsigned long npages);
				gfn_t base_gfn, unsigned long npages);


int kvm_iommu_map_pages(struct kvm *kvm,
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
			gfn_t base_gfn, unsigned long npages)
{
{
	gfn_t gfn = base_gfn;
	gfn_t gfn = slot->base_gfn;
	unsigned long npages = slot->npages;
	pfn_t pfn;
	pfn_t pfn;
	int i, r = 0;
	int i, r = 0;
	struct iommu_domain *domain = kvm->arch.iommu_domain;
	struct iommu_domain *domain = kvm->arch.iommu_domain;
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
		if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
		if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
			continue;
			continue;


		pfn = gfn_to_pfn(kvm, gfn);
		pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
		r = iommu_map_range(domain,
		r = iommu_map_range(domain,
				    gfn_to_gpa(gfn),
				    gfn_to_gpa(gfn),
				    pfn_to_hpa(pfn),
				    pfn_to_hpa(pfn),
@@ -69,7 +69,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
	return 0;
	return 0;


unmap_pages:
unmap_pages:
	kvm_iommu_put_pages(kvm, base_gfn, i);
	kvm_iommu_put_pages(kvm, slot->base_gfn, i);
	return r;
	return r;
}
}


@@ -81,8 +81,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
	slots = kvm->memslots;
	slots = kvm->memslots;


	for (i = 0; i < slots->nmemslots; i++) {
	for (i = 0; i < slots->nmemslots; i++) {
		r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
		r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
					slots->memslots[i].npages);
		if (r)
		if (r)
			break;
			break;
	}
	}
+1 −1
Original line number Original line Diff line number Diff line
@@ -684,7 +684,7 @@ skip_lpage:
	spin_unlock(&kvm->mmu_lock);
	spin_unlock(&kvm->mmu_lock);
#ifdef CONFIG_DMAR
#ifdef CONFIG_DMAR
	/* map the pages in iommu page table */
	/* map the pages in iommu page table */
	r = kvm_iommu_map_pages(kvm, base_gfn, npages);
	r = kvm_iommu_map_pages(kvm, memslot);
	if (r)
	if (r)
		goto out;
		goto out;
#endif
#endif