Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dde57a21 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

VT-d: adapt domain map and unmap functions for IOMMU API

parent 4c5478c9
Loading
Loading
Loading
Loading
+20 −13
Original line number Original line Diff line number Diff line
@@ -3047,20 +3047,28 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
	vm_domain_remove_one_dev_info(dmar_domain, pdev);
	vm_domain_remove_one_dev_info(dmar_domain, pdev);
}
}


int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
static int intel_iommu_map_range(struct iommu_domain *domain,
			    u64 hpa, size_t size, int prot)
				 unsigned long iova, phys_addr_t hpa,
				 size_t size, int iommu_prot)
{
{
	struct dmar_domain *dmar_domain = domain->priv;
	u64 max_addr;
	u64 max_addr;
	int addr_width;
	int addr_width;
	int prot = 0;
	int ret;
	int ret;


	if (iommu_prot & IOMMU_READ)
		prot |= DMA_PTE_READ;
	if (iommu_prot & IOMMU_WRITE)
		prot |= DMA_PTE_WRITE;

	max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
	max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
	if (domain->max_addr < max_addr) {
	if (dmar_domain->max_addr < max_addr) {
		int min_agaw;
		int min_agaw;
		u64 end;
		u64 end;


		/* check if minimum agaw is sufficient for mapped address */
		/* check if minimum agaw is sufficient for mapped address */
		min_agaw = vm_domain_min_agaw(domain);
		min_agaw = vm_domain_min_agaw(dmar_domain);
		addr_width = agaw_to_width(min_agaw);
		addr_width = agaw_to_width(min_agaw);
		end = DOMAIN_MAX_ADDR(addr_width);
		end = DOMAIN_MAX_ADDR(addr_width);
		end = end & VTD_PAGE_MASK;
		end = end & VTD_PAGE_MASK;
@@ -3070,28 +3078,27 @@ int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
			       __func__, min_agaw, max_addr);
			       __func__, min_agaw, max_addr);
			return -EFAULT;
			return -EFAULT;
		}
		}
		domain->max_addr = max_addr;
		dmar_domain->max_addr = max_addr;
	}
	}


	ret = domain_page_mapping(domain, iova, hpa, size, prot);
	ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL_GPL(intel_iommu_map_address);


void intel_iommu_unmap_address(struct dmar_domain *domain,
static void intel_iommu_unmap_range(struct iommu_domain *domain,
			       dma_addr_t iova, size_t size)
				    unsigned long iova, size_t size)
{
{
	struct dmar_domain *dmar_domain = domain->priv;
	dma_addr_t base;
	dma_addr_t base;


	/* The address might not be aligned */
	/* The address might not be aligned */
	base = iova & VTD_PAGE_MASK;
	base = iova & VTD_PAGE_MASK;
	size = VTD_PAGE_ALIGN(size);
	size = VTD_PAGE_ALIGN(size);
	dma_pte_clear_range(domain, base, base + size);
	dma_pte_clear_range(dmar_domain, base, base + size);


	if (domain->max_addr == base + size)
	if (dmar_domain->max_addr == base + size)
		domain->max_addr = base;
		dmar_domain->max_addr = base;
}
}
EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);


int intel_iommu_found(void)
int intel_iommu_found(void)
{
{
+0 −4
Original line number Original line Diff line number Diff line
@@ -330,10 +330,6 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,


extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);


int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
			    u64 hpa, size_t size, int prot);
void intel_iommu_unmap_address(struct dmar_domain *domain,
			       dma_addr_t iova, size_t size);
u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova);
u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova);


#ifdef CONFIG_DMAR
#ifdef CONFIG_DMAR