Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 549a17e4 authored by Marek Szyprowski's avatar Marek Szyprowski Committed by Inki Dae
Browse files

ARM: dma-mapping: add support for DMA_ATTR_FORCE_CONTIGUOUS attribute



This patch adds support for DMA_ATTR_FORCE_CONTIGUOUS attribute for
dma_alloc_attrs() in IOMMU-aware implementation. For allocating physically
contiguous buffers Contiguous Memory Allocator is used.

Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
parent 4b9347dc
Loading
Loading
Loading
Loading
+33 −8
Original line number Original line Diff line number Diff line
@@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
	spin_unlock_irqrestore(&mapping->lock, flags);
	spin_unlock_irqrestore(&mapping->lock, flags);
}
}


static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
					  gfp_t gfp, struct dma_attrs *attrs)
{
{
	struct page **pages;
	struct page **pages;
	int count = size >> PAGE_SHIFT;
	int count = size >> PAGE_SHIFT;
@@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
	if (!pages)
	if (!pages)
		return NULL;
		return NULL;


	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
	{
		unsigned long order = get_order(size);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, count, order);
		if (!page)
			goto error;

		__dma_clear_buffer(page, size);

		for (i = 0; i < count; i++)
			pages[i] = page + i;

		return pages;
	}

	while (count) {
	while (count) {
		int j, order = __fls(count);
		int j, order = __fls(count);


@@ -1083,14 +1101,21 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
	return NULL;
	return NULL;
}
}


static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
static int __iommu_free_buffer(struct device *dev, struct page **pages,
			       size_t size, struct dma_attrs *attrs)
{
{
	int count = size >> PAGE_SHIFT;
	int count = size >> PAGE_SHIFT;
	int array_size = count * sizeof(struct page *);
	int array_size = count * sizeof(struct page *);
	int i;
	int i;

	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
		dma_release_from_contiguous(dev, pages[0], count);
	} else {
		for (i = 0; i < count; i++)
		for (i = 0; i < count; i++)
			if (pages[i])
			if (pages[i])
				__free_pages(pages[i], 0);
				__free_pages(pages[i], 0);
	}

	if (array_size <= PAGE_SIZE)
	if (array_size <= PAGE_SIZE)
		kfree(pages);
		kfree(pages);
	else
	else
@@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
	if (gfp & GFP_ATOMIC)
	if (gfp & GFP_ATOMIC)
		return __iommu_alloc_atomic(dev, size, handle);
		return __iommu_alloc_atomic(dev, size, handle);


	pages = __iommu_alloc_buffer(dev, size, gfp);
	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
	if (!pages)
	if (!pages)
		return NULL;
		return NULL;


@@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping:
err_mapping:
	__iommu_remove_mapping(dev, *handle, size);
	__iommu_remove_mapping(dev, *handle, size);
err_buffer:
err_buffer:
	__iommu_free_buffer(dev, pages, size);
	__iommu_free_buffer(dev, pages, size, attrs);
	return NULL;
	return NULL;
}
}


@@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
	}
	}


	__iommu_remove_mapping(dev, handle, size);
	__iommu_remove_mapping(dev, handle, size);
	__iommu_free_buffer(dev, pages, size);
	__iommu_free_buffer(dev, pages, size, attrs);
}
}


static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,