Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eade5931 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "dma-mapping-fast: Add support for DMA_ATTR_FORCE_CONTIGUOUS"

parents 105cc6ea f3f78a96
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1071,7 +1071,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
	return cpu_addr;
}

static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
+91 −50
Original line number Diff line number Diff line
@@ -550,7 +550,6 @@ static void *fast_smmu_alloc_atomic(struct dma_fast_smmu_mapping *mapping,
	dma_addr_t dma_addr;
	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);

	size = ALIGN(size, FAST_PAGE_SIZE);
	if (coherent) {
		page = alloc_pages(gfp, get_order(size));
		addr = page ? page_address(page) : NULL;
@@ -613,6 +612,54 @@ static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
	return pages;
}

static void *__fast_smmu_alloc_contiguous(struct device *dev, size_t size,
			dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
	bool is_coherent = is_dma_coherent(dev, attrs);
	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
	struct page *page;
	dma_addr_t iova;
	unsigned long flags;
	void *coherent_addr;

	page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
					get_order(size), gfp & __GFP_NOWARN);
	if (!page)
		return NULL;


	spin_lock_irqsave(&mapping->lock, flags);
	iova = __fast_smmu_alloc_iova(mapping, attrs, size);
	spin_unlock_irqrestore(&mapping->lock, flags);
	if (iova == DMA_ERROR_CODE)
		goto release_page;

	if (av8l_fast_map_public(mapping->pgtbl_ops, iova, page_to_phys(page),
				 size, prot))
		goto release_iova;

	coherent_addr = dma_common_contiguous_remap(page, size, remap_prot,
						__fast_smmu_alloc_contiguous);
	if (!coherent_addr)
		goto release_mapping;

	if (!is_coherent)
		__dma_flush_area(page_to_virt(page), size);

	*handle = iova;
	return coherent_addr;

release_mapping:
	av8l_fast_unmap_public(mapping->pgtbl_ops, iova, size);
release_iova:
	__fast_smmu_free_iova(mapping, iova, size);
release_page:
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
	return NULL;
}

static void *fast_smmu_alloc(struct device *dev, size_t size,
			     dma_addr_t *handle, gfp_t gfp,
			     unsigned long attrs)
@@ -640,11 +687,14 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
	}

	*handle = DMA_ERROR_CODE;
	size = ALIGN(size, SZ_4K);

	if (!gfpflags_allow_blocking(gfp)) {
	if (!gfpflags_allow_blocking(gfp))
		return fast_smmu_alloc_atomic(mapping, size, gfp, attrs, handle,
					      is_coherent);
	}
	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
		return __fast_smmu_alloc_contiguous(dev, size, handle, gfp,
						    attrs);

	pages = __fast_smmu_alloc_pages(count, gfp);
	if (!pages) {
@@ -652,7 +702,6 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
		return NULL;
	}

	size = ALIGN(size, SZ_4K);
	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
		dev_err(dev, "no sg tablen\n");
		goto out_free_pages;
@@ -720,52 +769,47 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
}

static void fast_smmu_free(struct device *dev, size_t size,
			   void *vaddr, dma_addr_t dma_handle,
			   void *cpu_addr, dma_addr_t dma_handle,
			   unsigned long attrs)
{
	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
	struct vm_struct *area;
	struct page **pages = NULL;
	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
	unsigned long flags;

	size = ALIGN(size, SZ_4K);

	if (fast_dma_in_atomic_pool(vaddr, size) || !is_vmalloc_addr(vaddr))
		goto no_remap;

	area = find_vm_area(vaddr);
	if (WARN_ON_ONCE(!area))
		return;
	size = ALIGN(size, FAST_PAGE_SIZE);

	pages = area->pages;
	dma_common_free_remap(vaddr, size);
no_remap:
	spin_lock_irqsave(&mapping->lock, flags);
	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size);
	__fast_smmu_free_iova(mapping, dma_handle, size);
	spin_unlock_irqrestore(&mapping->lock, flags);
	if (fast_dma_in_atomic_pool(vaddr, size))
		fast_dma_free_from_pool(vaddr, size);
	else if (is_vmalloc_addr(vaddr))
		__fast_smmu_free_pages(pages, count);
	else
		__free_pages(virt_to_page(vaddr), get_order(size));

	area = find_vm_area(cpu_addr);
	if (area && area->pages) {
		struct page **pages = area->pages;

		dma_common_free_remap(cpu_addr, size);
		__fast_smmu_free_pages(pages, size >> FAST_PAGE_SHIFT);
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		struct page *page = vmalloc_to_page(cpu_addr);

		dma_common_free_remap(cpu_addr, size);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
	} else if (!is_vmalloc_addr(cpu_addr)) {
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	} else if (fast_dma_in_atomic_pool(cpu_addr, size)) {
		// Keep remap
		fast_dma_free_from_pool(cpu_addr, size);
	}
}

static int __vma_remap_range(struct vm_area_struct *vma, void *cpu_addr,
/* __swiotlb_mmap_pfn is not currently exported. */
static int fast_smmu_mmap_pfn(struct vm_area_struct *vma, unsigned long pfn,
			     size_t size)
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = vma_pages(vma);
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;

	if (fast_dma_in_atomic_pool(cpu_addr, size))
		pfn = fast_dma_get_phys(cpu_addr) >> PAGE_SHIFT;
	else
		pfn = page_to_pfn(virt_to_page(cpu_addr));

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start, pfn + off,
@@ -781,32 +825,27 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
				size_t size, unsigned long attrs)
{
	struct vm_struct *area;
	unsigned long uaddr = vma->vm_start;
	struct page **pages;
	int i, nr_pages, ret = 0;
	bool coherent = is_dma_coherent(dev, attrs);
	unsigned long pfn = 0;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     coherent);

	if (fast_dma_in_atomic_pool(cpu_addr, size) ||
	    !is_vmalloc_addr(cpu_addr))
		return __vma_remap_range(vma, cpu_addr, size);

	area = find_vm_area(cpu_addr);
	if (!area)
		return -EINVAL;
	if (area && area->pages)
		return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
				      attrs);
	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
		pfn = vmalloc_to_pfn(cpu_addr);
	else if (!is_vmalloc_addr(cpu_addr))
		pfn = page_to_pfn(virt_to_page(cpu_addr));
	else if (fast_dma_in_atomic_pool(cpu_addr, size))
		pfn = fast_dma_get_phys(cpu_addr) >> PAGE_SHIFT;

	pages = area->pages;
	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
		ret = vm_insert_page(vma, uaddr, pages[i]);
		if (ret)
			break;
		uaddr += PAGE_SIZE;
	}

	return ret;
	if (pfn)
		return fast_smmu_mmap_pfn(vma, pfn, size);

	return -EINVAL;
}

static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -822,6 +861,8 @@ static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
	if (area && area->pages)
		return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0,
						 size, GFP_KERNEL);
	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
		page = vmalloc_to_page(cpu_addr);
	else if (!is_vmalloc_addr(cpu_addr))
		page = virt_to_page(cpu_addr);
	else if (fast_dma_in_atomic_pool(cpu_addr, size))
+3 −0
Original line number Diff line number Diff line
@@ -27,6 +27,9 @@ size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad,
int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg,
		int nents, dma_addr_t dma_addr);
void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents);
int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		   void *cpu_addr, dma_addr_t dma_addr, size_t size,
		   unsigned long attrs);

/* Setup call for arch DMA mapping code */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);