Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 072bebc0 authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel
Browse files

iommu/dma: Refactor iommu_dma_alloc



Shuffle around the self-contained atomic and non-contiguous cases to
return early and get out of the way of the CMA case that we're about to
work on next.

Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
[hch: slight changes to the code flow]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent bcf4b9c4
Loading
Loading
Loading
Loading
+30 −30
Original line number Diff line number Diff line
@@ -973,14 +973,19 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
	size_t iosize = size;
	struct page *page;
	void *addr;

	size = PAGE_ALIGN(size);
	gfp |= __GFP_ZERO;

	if (gfpflags_allow_blocking(gfp) &&
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);

	if (!gfpflags_allow_blocking(gfp)) {
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
@@ -1002,11 +1007,10 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
				__free_pages(page, get_order(size));
			else
				dma_free_from_pool(addr, size);
			addr = NULL;
			return NULL;
		}
		return addr;
	}
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
		struct page *page;

	page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
					 get_order(size), gfp & __GFP_NOWARN);
@@ -1014,27 +1018,23 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
		return NULL;

	*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
		if (*handle == DMA_MAPPING_ERROR) {
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
			return NULL;
		}
		addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
						   prot,
	if (*handle == DMA_MAPPING_ERROR)
		goto out_free_pages;

	addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
			__builtin_return_address(0));
		if (addr) {
	if (!addr)
		goto out_unmap;

	if (!coherent)
		arch_dma_prep_coherent(page, iosize);
	memset(addr, 0, size);
		} else {
			__iommu_dma_unmap(dev, *handle, iosize);
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
		}
	} else {
		addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
	}
	return addr;
out_unmap:
	__iommu_dma_unmap(dev, *handle, iosize);
out_free_pages:
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
	return NULL;
}

static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,