Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 875764de authored by David Woodhouse's avatar David Woodhouse
Browse files

intel-iommu: Simplify __intel_alloc_iova()



There's no need for the separate iommu_alloc_iova() function, and
certainly not for it to be global. Remove the underscores while we're at
it.

Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent 6f6a00e4
Loading
Loading
Loading
Loading
+18 −31
Original line number Original line Diff line number Diff line
@@ -2323,43 +2323,31 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
	return host_addr >> VTD_PAGE_SHIFT;
	return host_addr >> VTD_PAGE_SHIFT;
}
}


struct iova *
static struct iova *intel_alloc_iova(struct device *dev,
iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
				     struct dmar_domain *domain,
{
				     unsigned long nrpages, uint64_t dma_mask)
	struct iova *piova;

	/* Make sure it's in range */
	end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
	if (!size || (IOVA_START_ADDR + size > end))
		return NULL;

	piova = alloc_iova(&domain->iovad,
			size >> PAGE_SHIFT, IOVA_PFN(end), 1);
	return piova;
}

static struct iova *
__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
		   size_t size, u64 dma_mask)
{
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct pci_dev *pdev = to_pci_dev(dev);
	struct iova *iova = NULL;
	struct iova *iova = NULL;


	if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
	/* Restrict dma_mask to the width that the iommu can handle */
		iova = iommu_alloc_iova(domain, size, dma_mask);
	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
	else {

	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
		/*
		/*
		 * First try to allocate an io virtual address in
		 * First try to allocate an io virtual address in
		 * DMA_BIT_MASK(32) and if that fails then try allocating
		 * DMA_BIT_MASK(32) and if that fails then try allocating
		 * from higher range
		 * from higher range
		 */
		 */
		iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
		iova = alloc_iova(&domain->iovad, nrpages,
		if (!iova)
				  IOVA_PFN(DMA_BIT_MASK(32)), 1);
			iova = iommu_alloc_iova(domain, size, dma_mask);
		if (iova)
			return iova;
	}
	}

	iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
	if (!iova) {
	if (unlikely(!iova)) {
		printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
		printk(KERN_ERR "Allocating %ld-page iova for %s failed",
		       nrpages, pci_name(pdev));
		return NULL;
		return NULL;
	}
	}


@@ -2464,7 +2452,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
	iommu = domain_get_iommu(domain);
	iommu = domain_get_iommu(domain);
	size = aligned_nrpages(paddr, size);
	size = aligned_nrpages(paddr, size);


	iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask);
	iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
	if (!iova)
	if (!iova)
		goto error;
		goto error;


@@ -2753,8 +2741,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
	for_each_sg(sglist, sg, nelems, i)
	for_each_sg(sglist, sg, nelems, i)
		size += aligned_nrpages(sg->offset, sg->length);
		size += aligned_nrpages(sg->offset, sg->length);


	iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT,
	iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
				  pdev->dma_mask);
	if (!iova) {
	if (!iova) {
		sglist->dma_length = 0;
		sglist->dma_length = 0;
		return 0;
		return 0;