Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c120d5b authored by Laura Abbott's avatar Laura Abbott Committed by Matt Wagantall
Browse files

ARM: dma-mapping: Allow highmem pages to not have a mapping



The DMA_ATTR_NO_KERNEL_MAPPING is used to make sure that CMA
pages have no kernel mapping. Add support to make sure that
highmem pages have no mapping.

Change-Id: Ife76df126ecfedf0dba81a35e0de8a1787355b3d
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Signed-off-by: default avatarRohit Vaswani <rvaswani@codeaurora.org>
parent 7b3555b5
Loading
Loading
Loading
Loading
+22 −11
Original line number Diff line number Diff line
@@ -326,10 +326,10 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
			prot, caller);
}

static void __dma_free_remap(void *cpu_addr, size_t size)
static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn)
{
	dma_common_free_remap(cpu_addr, size,
			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
			VM_ARM_DMA_CONSISTENT | VM_USERMAP, no_warn);
}

#define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
@@ -374,8 +374,8 @@ static int __init atomic_pool_init(void)
		goto out;

	if (IS_ENABLED(CONFIG_DMA_CMA))
		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
					      atomic_pool_init, false);
		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
				&page, atomic_pool_init, false);
	else
		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
					   &page, atomic_pool_init);
@@ -555,6 +555,7 @@ static int __free_from_pool(void *start, size_t size)
	return 1;
}

#define NO_KERNEL_MAPPING_DUMMY	0x2222
static void *__alloc_from_contiguous(struct device *dev, size_t size,
				     pgprot_t prot, struct page **ret_page,
				     const void *caller,
@@ -572,11 +573,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
	__dma_clear_buffer(page, size);

	if (PageHighMem(page)) {
		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
		if (no_kernel_mapping) {
			/*
			 * Something non-NULL needs to be returned here. Give
			 * back a dummy address that is unmapped to catch
			 * clients trying to use the address incorrectly
			 */
			ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
		} else {
			ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot,
						caller);
			if (!ptr) {
				dma_release_from_contiguous(dev, page, count);
				return NULL;
			}
		}
	} else {
		__dma_remap(page, size, prot, no_kernel_mapping);
		ptr = page_address(page);
@@ -589,7 +600,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
				   void *cpu_addr, size_t size)
{
	if (PageHighMem(page))
		__dma_free_remap(cpu_addr, size);
		__dma_free_remap(cpu_addr, size, true);
	else
		__dma_remap(page, size, pgprot_kernel, false);
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
@@ -620,7 +631,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __alloc_from_contiguous(dev, size, prot, ret, c, w)	NULL
#define __free_from_pool(cpu_addr, size)			0
#define __free_from_contiguous(dev, page, cpu_addr, size)	do { } while (0)
#define __dma_free_remap(cpu_addr, size)			do { } while (0)
#define __dma_free_remap(cpu_addr, size, w)			do { } while (0)

#endif	/* CONFIG_MMU */

@@ -802,7 +813,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
	} else if (__free_from_pool(cpu_addr, size)) {
		return;
	} else if (!dev_get_cma_area(dev)) {
		__dma_free_remap(cpu_addr, size);
		__dma_free_remap(cpu_addr, size, false);
		__dma_free_buffer(page, size);
	} else {
		/*