Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0c84257 authored by Patrick Daly's avatar Patrick Daly Committed by Gerrit - the friendly Code Review server
Browse files

arm: dma: Allow CMA pages to not have a kernel mapping



Currently, there are use cases where not having any kernel
mapping is required; if the CMA memory needs to be used as
a pool which can have both cached and uncached mappings we
need to remove the mapping to avoid the multiple mapping
problem. Extend the dma APIs to use the DMA_ATTR_NO_KERNEL_MAPPING
with CMA. This doesn't end up saving any virtual address space
but the mapping will still not be present.

Change-Id: I64d21250abbe615c43e2b5b1272ee2b6d106705a
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
Signed-off-by: default avatarVijayanand Jitta <vjitta@codeaurora.org>
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
Signed-off-by: default avatarQingqing Zhou <qqzhou@codeaurora.org>
parent f898a884
Loading
Loading
Loading
Loading
+23 −13
Original line number Diff line number Diff line
@@ -526,12 +526,28 @@ static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
	return 0;
}

static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
			    void *data)
{
	pte_clear(&init_mm, addr, pte);
	return 0;
}

static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
			bool want_vaddr)
{
	unsigned long start = (unsigned long) page_address(page);
	unsigned end = start + size;
	int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
			    void *data);

	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
	if (!want_vaddr)
		func = __dma_clear_pte;
	else
		func = __dma_update_pte;

	apply_to_page_range(&init_mm, start, size, func, &prot);
	mb(); /*Ensure pte's are updated */
	flush_tlb_kernel_range(start, end);
}

@@ -614,9 +630,6 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,

	__dma_clear_buffer(page, size, coherent_flag);

	if (!want_vaddr)
		goto out;

	if (PageHighMem(page)) {
		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
		if (!ptr) {
@@ -624,11 +637,10 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
			return NULL;
		}
	} else {
		__dma_remap(page, size, prot);
		__dma_remap(page, size, prot, want_vaddr);
		ptr = page_address(page);
	}

 out:
	*ret_page = page;
	return ptr;
}
@@ -636,12 +648,10 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
static void __free_from_contiguous(struct device *dev, struct page *page,
				   void *cpu_addr, size_t size, bool want_vaddr)
{
	if (want_vaddr) {
	if (PageHighMem(page))
		__dma_free_remap(cpu_addr, size, true);
	else
			__dma_remap(page, size, PAGE_KERNEL);
	}
		__dma_remap(page, size, PAGE_KERNEL, true);
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}