Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 21b95aaf authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Joerg Roedel
Browse files

iommu/dma: Refactor the page array remapping allocator



Move the call to dma_common_pages_remap into __iommu_dma_alloc and
rename it to iommu_dma_alloc_remap.  This creates a self-contained
helper for remapped pages allocation and mapping.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 4c360ace
Loading
Loading
Loading
Loading
+26 −28
Original line number Diff line number Diff line
@@ -564,9 +564,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
}

/**
 * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
 * @dev: Device which owns this buffer
 * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
 * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
 * @size: Size of buffer in bytes
 * @handle: DMA address of buffer
 *
@@ -582,33 +582,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
}

/**
 * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
 * @dev: Device to allocate memory for. Must be a real device
 *	 attached to an iommu_dma_domain
 * @size: Size of buffer in bytes
 * @dma_handle: Out argument for allocated DMA handle
 * @gfp: Allocation flags
 * @attrs: DMA attributes for this allocation
 * @prot: IOMMU mapping flags
 * @handle: Out argument for allocated DMA handle
 *
 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 * but an IOMMU which supports smaller pages might not map the whole thing.
 *
 * Return: Array of struct page pointers describing the buffer,
 *	   or NULL on failure.
 * Return: Mapped virtual address, or NULL on failure.
 */
static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
		gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
	struct page **pages;
	struct sg_table sgt;
	dma_addr_t iova;
	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
	void *vaddr;

	*handle = DMA_MAPPING_ERROR;
	*dma_handle = DMA_MAPPING_ERROR;

	min_size = alloc_sizes & -alloc_sizes;
	if (min_size < PAGE_SIZE) {
@@ -634,7 +636,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
		goto out_free_iova;

	if (!(prot & IOMMU_CACHE)) {
	if (!(ioprot & IOMMU_CACHE)) {
		struct scatterlist *sg;
		int i;

@@ -642,14 +644,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
			arch_dma_prep_coherent(sg_page(sg), sg->length);
	}

	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
			< size)
		goto out_free_sg;

	*handle = iova;
	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
			__builtin_return_address(0));
	if (!vaddr)
		goto out_unmap;

	*dma_handle = iova;
	sg_free_table(&sgt);
	return pages;
	return vaddr;

out_unmap:
	__iommu_dma_unmap(dev, iova, size);
out_free_sg:
	sg_free_table(&sgt);
out_free_iova:
@@ -1008,18 +1017,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
						    size >> PAGE_SHIFT);
		}
	} else {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
		struct page **pages;

		pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle);
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
			__iommu_dma_free(dev, pages, iosize, handle);
		addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
	}
	return addr;
}
@@ -1033,7 +1031,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
	/*
	 * @cpu_addr will be one of 4 things depending on how it was allocated:
	 * - A remapped array of pages for contiguous allocations.
	 * - A remapped array of pages from __iommu_dma_alloc(), for all
	 * - A remapped array of pages from iommu_dma_alloc_remap(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.