Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8de9d3a1 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm64: dma-mapping: fix aliasing issues with non-CMA alloc"

parents f61cd8b8 963f5b98
Loading
Loading
Loading
Loading
+14 −13
Original line number Diff line number Diff line
@@ -157,12 +157,13 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
				  unsigned long attrs)
{
	void *addr;

	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA;
	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
		struct page *page;
		void *addr;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size), flags);
@@ -172,20 +173,20 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
		*dma_handle = phys_to_dma(dev, page_to_phys(page));
		addr = page_address(page);
		memset(addr, 0, size);
	} else {
		addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}

		if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
		    (attrs & DMA_ATTR_STRONGLY_ORDERED)) {
	if (addr && ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
			(attrs & DMA_ATTR_STRONGLY_ORDERED))) {
		/*
		 * flush the caches here because we can't later
		 */
		__dma_flush_area(addr, size);
			__dma_remap(page, size, __pgprot(0), true);
		__dma_remap(virt_to_page(addr), size, __pgprot(0), true);
	}

	return addr;
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}
}

static void __dma_free_coherent(struct device *dev, size_t size,