Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b700393 authored by Vineet Gupta's avatar Vineet Gupta
Browse files

ARC: dma: non-coherent pages need V-P mapping if in HIGHMEM



Previously a non-coherent page (hardware IOC or simply driver needs)
could be handled by cpu with paddr alone (kvaddr used to be needed for
coherent mappings to enforce uncached semantics via a MMU mapping).

Now however such a page might still require a V-P mapping if it was in
physical address space > 32bits due to PAE40, which the CPU can't access
directly with a paddr

So decouple decision of kvaddr allocation from type of alloc request
(coh/non-coh)

Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent d98a15a5
Loading
Loading
Loading
Loading
+32 −15
Original line number Diff line number Diff line
@@ -28,23 +28,18 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
	struct page *page;
	phys_addr_t paddr;
	void *kvaddr;
	int need_coh = 1, need_kvaddr = 0;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/* This is linear addr (0x8000_0000 based) */
	paddr = page_to_phys(page);

	/* For now bus address is exactly same as paddr */
	*dma_handle = paddr;

	/*
	 * IOC relies on all data (even coherent DMA data) being in cache
	 * Thus allocate normal cached memory
	 *
	 * The gains with IOC are two pronged:
	 *   -For streaming data, elides needs for cache maintenance, saving
	 *   -For streaming data, elides need for cache maintenance, saving
	 *    cycles in flush code, and bus bandwidth as all the lines of a
	 *    buffer need to be flushed out to memory
	 *   -For coherent data, Read/Write to buffers terminate early in cache
@@ -52,14 +47,32 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
	 */
	if ((is_isa_arcv2() && ioc_exists) ||
	    dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
		return paddr;
		need_coh = 0;

	/*
	 * - A coherent buffer needs MMU mapping to enforce non-cachability
	 * - A highmem page needs a virtual handle (hence MMU mapping)
	 *   independent of cachability
	 */
	if (PageHighMem(page) || need_coh)
		need_kvaddr = 1;

	/* This is linear addr (0x8000_0000 based) */
	paddr = page_to_phys(page);

	/* For now bus address is exactly same as paddr */
	*dma_handle = paddr;

	/* This is kernel Virtual address (0x7000_0000 based) */
	if (need_kvaddr) {
		kvaddr = ioremap_nocache((unsigned long)paddr, size);
		if (kvaddr == NULL) {
			__free_pages(page, order);
			return NULL;
		}
	} else {
		kvaddr = (void *)paddr;
	}

	/*
	 * Evict any existing L1 and/or L2 lines for the backing page
@@ -71,6 +84,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
	 * Currently flush_cache_vmap nukes the L1 cache completely which
	 * will be optimized as a separate commit
	 */
	if (need_coh)
		dma_cache_wback_inv((unsigned long)paddr, size);

	return kvaddr;
@@ -80,9 +94,12 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	struct page *page = virt_to_page(dma_handle);
	int is_non_coh = 1;

	is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
			(is_isa_arcv2() && ioc_exists);

	if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
	    !(is_isa_arcv2() && ioc_exists))
	if (PageHighMem(page) || !is_non_coh)
		iounmap((void __force __iomem *)vaddr);

	__free_pages(page, get_order(size));