Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0eade166 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm: Skip DMA zeroing completely only if specified"

parents 075902fd 702f40a1
Loading
Loading
Loading
Loading
+24 −18
Original line number Diff line number Diff line
@@ -238,7 +238,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
	return mask;
}

static void __dma_clear_buffer(struct page *page, size_t size)
static void __dma_clear_buffer(struct page *page, size_t size,
					struct dma_attrs *attrs)
{
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
@@ -249,6 +250,7 @@ static void __dma_clear_buffer(struct page *page, size_t size)
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
				memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
@@ -258,6 +260,7 @@ static void __dma_clear_buffer(struct page *page, size_t size)
		outer_flush_range(base, end);
	} else {
		void *ptr = page_address(page);
		if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
			memset(ptr, 0, size);
		dmac_flush_range(ptr, ptr + size);
		outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -284,7 +287,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);
	__dma_clear_buffer(page, size, NULL);

	return page;
}
@@ -307,7 +310,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
				     pgprot_t prot, struct page **ret_page,
				     const void *caller,
				     bool no_kernel_mapping);
				     struct dma_attrs *attrs);

static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
				 pgprot_t prot, struct page **ret_page,
@@ -375,7 +378,7 @@ static int __init atomic_pool_init(void)

	if (IS_ENABLED(CONFIG_DMA_CMA))
		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
				&page, atomic_pool_init, false);
				&page, atomic_pool_init, NULL);
	else
		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
					   &page, atomic_pool_init);
@@ -559,18 +562,25 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
				     pgprot_t prot, struct page **ret_page,
				     const void *caller,
				     bool no_kernel_mapping)
				     struct dma_attrs *attrs)
{
	unsigned long order = get_order(size);
	size_t count = size >> PAGE_SHIFT;
	struct page *page;
	void *ptr;
	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
							attrs);

	page = dma_alloc_from_contiguous(dev, count, order);
	if (!page)
		return NULL;

	__dma_clear_buffer(page, size);
	/*
	 * skip completely if we neither need to zero nor sync.
	 */
	if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) &&
	      dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)))
		__dma_clear_buffer(page, size, attrs);

	if (PageHighMem(page)) {
		if (no_kernel_mapping) {
@@ -655,7 +665,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,

static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
			 gfp_t gfp, pgprot_t prot, bool is_coherent,
			 const void *caller, bool no_kernel_mapping)
			 const void *caller, struct dma_attrs *attrs)
{
	u64 mask = get_coherent_dma_mask(dev);
	struct page *page = NULL;
@@ -696,7 +706,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
	else
		addr = __alloc_from_contiguous(dev, size, prot, &page, caller,
						no_kernel_mapping);
						attrs);

	if (addr)
		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -713,14 +723,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
{
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	void *memory;
	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
					attrs);

	if (dma_alloc_from_coherent(dev, size, handle, &memory))
		return memory;

	return __dma_alloc(dev, size, handle, gfp, prot, false,
			   __builtin_return_address(0), no_kernel_mapping);
			   __builtin_return_address(0), attrs);
}

static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@@ -728,14 +736,12 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
{
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	void *memory;
	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
					attrs);

	if (dma_alloc_from_coherent(dev, size, handle, &memory))
		return memory;

	return __dma_alloc(dev, size, handle, gfp, prot, true,
			   __builtin_return_address(0), no_kernel_mapping);
			   __builtin_return_address(0), attrs);
}

/*
@@ -1212,7 +1218,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
		if (!page)
			goto error;

		__dma_clear_buffer(page, size);
		__dma_clear_buffer(page, size, NULL);

		for (i = 0; i < count; i++)
			pages[i] = page + i;
@@ -1241,7 +1247,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
				pages[i + j] = pages[i] + j;
		}

		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
		__dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL);
		i += 1 << order;
		count -= 1 << order;
	}