Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 260782bc authored by Weidong Han's avatar Weidong Han Committed by Joerg Roedel
Browse files

KVM: use the new intel iommu APIs



intel iommu APIs are updated, use the new APIs.

In addition, change kvm_iommu_map_guest() to just create the domain, let kvm_iommu_assign_device() assign device.

Signed-off-by: default avatarWeidong Han <weidong.han@intel.com>
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent fe40f1e0
Loading
Loading
Loading
Loading
+10 −5
Original line number Diff line number Diff line
@@ -330,9 +330,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#ifdef CONFIG_DMAR
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
			unsigned long npages);
int kvm_iommu_map_guest(struct kvm *kvm,
			struct kvm_assigned_dev_kernel *assigned_dev);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_assign_device(struct kvm *kvm,
		      struct kvm_assigned_dev_kernel *assigned_dev);
#else /* CONFIG_DMAR */
static inline int kvm_iommu_map_pages(struct kvm *kvm,
				      gfn_t base_gfn,
@@ -341,9 +342,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
	return 0;
}

static inline int kvm_iommu_map_guest(struct kvm *kvm,
				      struct kvm_assigned_dev_kernel
				      *assigned_dev)
static inline int kvm_iommu_map_guest(struct kvm *kvm)
{
	return -ENODEV;
}
@@ -352,6 +351,12 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
{
	return 0;
}

static inline int kvm_assign_device(struct kvm *kvm,
		struct kvm_assigned_dev_kernel *assigned_dev)
{
	return 0;
}
#endif /* CONFIG_DMAR */

static inline void kvm_guest_enter(void)
+6 −1
Original line number Diff line number Diff line
@@ -503,7 +503,12 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
	list_add(&match->list, &kvm->arch.assigned_dev_head);

	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
		r = kvm_iommu_map_guest(kvm, match);
		if (!kvm->arch.intel_iommu_domain) {
			r = kvm_iommu_map_guest(kvm);
			if (r)
				goto out_list_del;
		}
		r = kvm_assign_device(kvm, match);
		if (r)
			goto out_list_del;
	}
+55 −43
Original line number Diff line number Diff line
@@ -45,20 +45,18 @@ int kvm_iommu_map_pages(struct kvm *kvm,

	for (i = 0; i < npages; i++) {
		/* check if already mapped */
		pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
						     gfn_to_gpa(gfn));
		if (pfn)
		if (intel_iommu_iova_to_phys(domain,
					     gfn_to_gpa(gfn)))
			continue;

		pfn = gfn_to_pfn(kvm, gfn);
		r = intel_iommu_page_mapping(domain,
		r = intel_iommu_map_address(domain,
					    gfn_to_gpa(gfn),
					    pfn_to_hpa(pfn),
					    PAGE_SIZE,
					     DMA_PTE_READ |
					     DMA_PTE_WRITE);
					    DMA_PTE_READ | DMA_PTE_WRITE);
		if (r) {
			printk(KERN_ERR "kvm_iommu_map_pages:"
			printk(KERN_ERR "kvm_iommu_map_address:"
			       "iommu failed to map pfn=%lx\n", pfn);
			goto unmap_pages;
		}
@@ -86,50 +84,55 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
	return r;
}

int kvm_iommu_map_guest(struct kvm *kvm,
int kvm_assign_device(struct kvm *kvm,
		      struct kvm_assigned_dev_kernel *assigned_dev)
{
	struct pci_dev *pdev = NULL;
	struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
	int r;

	if (!intel_iommu_found()) {
		printk(KERN_ERR "%s: intel iommu not found\n", __func__);
	/* check if iommu exists and in use */
	if (!domain)
		return 0;

	pdev = assigned_dev->dev;
	if (pdev == NULL)
		return -ENODEV;

	r = intel_iommu_attach_device(domain, pdev);
	if (r) {
		printk(KERN_ERR "assign device %x:%x.%x failed",
			pdev->bus->number,
			PCI_SLOT(pdev->devfn),
			PCI_FUNC(pdev->devfn));
		return r;
	}

	printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n",
	printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
		assigned_dev->host_busnr,
		PCI_SLOT(assigned_dev->host_devfn),
		PCI_FUNC(assigned_dev->host_devfn));

	pdev = assigned_dev->dev;

	if (pdev == NULL) {
		if (kvm->arch.intel_iommu_domain) {
			intel_iommu_domain_exit(kvm->arch.intel_iommu_domain);
			kvm->arch.intel_iommu_domain = NULL;
	return 0;
}

int kvm_iommu_map_guest(struct kvm *kvm)
{
	int r;

	if (!intel_iommu_found()) {
		printk(KERN_ERR "%s: intel iommu not found\n", __func__);
		return -ENODEV;
	}

	kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev);
	kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain();
	if (!kvm->arch.intel_iommu_domain)
		return -ENODEV;
		return -ENOMEM;

	r = kvm_iommu_map_memslots(kvm);
	if (r)
		goto out_unmap;

	intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
			       pdev->bus->number, pdev->devfn);

	r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
					pdev);
	if (r) {
		printk(KERN_ERR "Domain context map for %s failed",
		       pci_name(pdev));
		goto out_unmap;
	}
	return 0;

out_unmap:
@@ -143,14 +146,24 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
	gfn_t gfn = base_gfn;
	pfn_t pfn;
	struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
	int i;
	unsigned long i;
	u64 phys;

	/* check if iommu exists and in use */
	if (!domain)
		return;

	for (i = 0; i < npages; i++) {
		pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
		phys = intel_iommu_iova_to_phys(domain,
						gfn_to_gpa(gfn));
		pfn = phys >> PAGE_SHIFT;
		kvm_release_pfn_clean(pfn);
		gfn++;
	}

	intel_iommu_unmap_address(domain,
				  gfn_to_gpa(base_gfn),
				  PAGE_SIZE * npages);
}

static int kvm_iommu_unmap_memslots(struct kvm *kvm)
@@ -182,10 +195,9 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
		       PCI_FUNC(entry->host_devfn));

		/* detach kvm dmar domain */
		intel_iommu_detach_dev(domain, entry->host_busnr,
				       entry->host_devfn);
		intel_iommu_detach_device(domain, entry->dev);
	}
	kvm_iommu_unmap_memslots(kvm);
	intel_iommu_domain_exit(domain);
	intel_iommu_free_domain(domain);
	return 0;
}