Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ca6d749 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm64/iommu: handle atomic pool addresses in ->get_sgtable and ->mmap"

parents 93f03d2a dca4c7f4
Loading
Loading
Loading
Loading
+26 −30
Original line number Original line Diff line number Diff line
@@ -791,6 +791,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
{
{
	struct vm_struct *area;
	struct vm_struct *area;
	int ret;
	int ret;
	unsigned long pfn = 0;


	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_dma_coherent(dev, attrs));
					     is_dma_coherent(dev, attrs));
@@ -798,25 +799,23 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;
		return ret;


	if (!is_vmalloc_addr(cpu_addr)) {
	area = find_vm_area(cpu_addr);
		unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}


	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
	if (area && area->pages)
		return iommu_dma_mmap(area->pages, size, vma);
	else if (!is_vmalloc_addr(cpu_addr))
		pfn = page_to_pfn(virt_to_page(cpu_addr));
	else if (is_vmalloc_addr(cpu_addr))
		/*
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations are
		 * hence in the vmalloc space.
		 * always remapped, hence in the vmalloc space.
		 */
		 */
		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
		pfn = vmalloc_to_pfn(cpu_addr);

	if (pfn)
		return __swiotlb_mmap_pfn(vma, pfn, size);
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}


	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
	return -ENXIO;
	return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}
}


static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -824,27 +823,24 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       size_t size, unsigned long attrs)
			       size_t size, unsigned long attrs)
{
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;
	struct vm_struct *area = find_vm_area(cpu_addr);
	struct vm_struct *area = find_vm_area(cpu_addr);


	if (!is_vmalloc_addr(cpu_addr)) {
	if (area && area->pages)
		struct page *page = virt_to_page(cpu_addr);
		return sg_alloc_table_from_pages(sgt, area->pages, count, 0,
		return __swiotlb_get_sgtable_page(sgt, page, size);
					size, GFP_KERNEL);
	}
	else if (!is_vmalloc_addr(cpu_addr))

		page = virt_to_page(cpu_addr);
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
	else if (is_vmalloc_addr(cpu_addr))
		/*
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations
		 * hence in the vmalloc space.
		 * are always remapped, hence in the vmalloc space.
		 */
		 */
		struct page *page = vmalloc_to_page(cpu_addr);
		page = vmalloc_to_page(cpu_addr);
		return __swiotlb_get_sgtable_page(sgt, page, size);
	}


	if (WARN_ON(!area || !area->pages))
	if (page)
		return __swiotlb_get_sgtable_page(sgt, page, size);
	return -ENXIO;
	return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}
}


static void __iommu_sync_single_for_cpu(struct device *dev,
static void __iommu_sync_single_for_cpu(struct device *dev,