Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 21caf3a7 authored by Lorenzo Nava's avatar Lorenzo Nava Committed by Russell King
Browse files

ARM: 8398/1: arm DMA: Fix allocation from CMA for coherent DMA



This patch allows the use of CMA for DMA coherent memory allocation.
At the moment if the input parameter "is_coherent" is set to true
the allocation is not made using the CMA, which I think is not the
desired behaviour.
The patch covers the allocation and free of memory for coherent
DMA.

Signed-off-by: default avatarLorenzo Nava <lorenx4@gmail.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 1234e3fd
Loading
Loading
Loading
Loading
+12 −9
Original line number Diff line number Diff line
@@ -649,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
	size = PAGE_ALIGN(size);
	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);

	if (is_coherent || nommu())
	if (nommu())
		addr = __alloc_simple_buffer(dev, size, gfp, &page);
	else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
		addr = __alloc_from_contiguous(dev, size, prot, &page,
					       caller, want_vaddr);
	else if (is_coherent)
		addr = __alloc_simple_buffer(dev, size, gfp, &page);
	else if (!(gfp & __GFP_WAIT))
		addr = __alloc_from_pool(size, &page);
	else if (!dev_get_cma_area(dev))
		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
	else
		addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
					    caller, want_vaddr);

	if (page)
		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -684,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	void *memory;

	if (dma_alloc_from_coherent(dev, size, handle, &memory))
		return memory;

	return __dma_alloc(dev, size, handle, gfp, prot, true,
	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
			   attrs, __builtin_return_address(0));
}

@@ -754,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,

	size = PAGE_ALIGN(size);

	if (is_coherent || nommu()) {
	if (nommu()) {
		__dma_free_buffer(page, size);
	} else if (__free_from_pool(cpu_addr, size)) {
	} else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
		return;
	} else if (!dev_get_cma_area(dev)) {
		if (want_vaddr)
		if (want_vaddr && !is_coherent)
			__dma_free_remap(cpu_addr, size);
		__dma_free_buffer(page, size);
	} else {