Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a3f06cd authored by Taniya Das's avatar Taniya Das
Browse files

arm: Skip DMA zeroing completely only if specified



With the dma attribute 'SKIP_ZEROING', dma allocation skips memset and
also cache flush operations. This leaves dirty lines in the caches which
when evicted later would cause issue to the allocated buffer. For some
clients this may be okay but other clients need to do the actual sync.
Switch the code to only skip __dma_clear_buffer if both
DMA_ATTR_SKIP_ZEROING and DMA_ATTR_SKIP_CPU_SYNC are specified. If only
DMA_ATTR_SKIP_ZEROING is specified, just skip the zeroing but still do the
cache operations.

Change-Id: I81dd6bbeed7ec7f1dce28cd8d0aba7c8900ec550
Signed-off-by: default avatarTaniya Das <tdas@codeaurora.org>
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
parent 61b52bcb
Loading
Loading
Loading
Loading
+15 −8
Original line number Diff line number Diff line
@@ -219,7 +219,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
	return mask;
}

static void __dma_clear_buffer(struct page *page, size_t size)
static void __dma_clear_buffer(struct page *page, size_t size,
					struct dma_attrs *attrs)
{
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
@@ -230,6 +231,7 @@ static void __dma_clear_buffer(struct page *page, size_t size)
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
				memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
@@ -239,6 +241,7 @@ static void __dma_clear_buffer(struct page *page, size_t size)
		outer_flush_range(base, end);
	} else {
		void *ptr = page_address(page);
		if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
			memset(ptr, 0, size);
		dmac_flush_range(ptr, ptr + size);
		outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -265,7 +268,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);
	__dma_clear_buffer(page, size, NULL);

	return page;
}
@@ -627,8 +630,12 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,

	page = pfn_to_page(pfn);

	if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
		__dma_clear_buffer(page, size);
	/*
	 * skip completely if we neither need to zero nor sync.
	 */
	if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) &&
	      dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)))
		__dma_clear_buffer(page, size, attrs);

	if (PageHighMem(page)) {
		if (no_kernel_mapping) {
@@ -1170,7 +1177,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,

		pfn = pfn_to_page(pfn);

		__dma_clear_buffer(page, size);
		__dma_clear_buffer(page, size, NULL);

		for (i = 0; i < count; i++)
			pages[i] = page + i;
@@ -1199,7 +1206,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
				pages[i + j] = pages[i] + j;
		}

		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
		__dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL);
		i += 1 << order;
		count -= 1 << order;
	}