Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c1de929 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Stafford Horne
Browse files

openrisc: fix cache maintainance the the sync_single_for_device DMA operation



The cache maintaince in the sync_single_for_device operation should be
equivalent to the map_page operation to facilitate reusing buffers.  Fix the
openrisc implementation by moving the cache maintaince performed in map_page
into the sync_single method, and calling that from map_page.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarStafford Horne <shorne@gmail.com>
parent dcc9c919
Loading
Loading
Loading
Loading
+19 −23
Original line number Diff line number Diff line
@@ -133,19 +133,15 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr,
	free_pages_exact(vaddr, size);
}

static dma_addr_t
or1k_map_page(struct device *dev, struct page *page,
	      unsigned long offset, size_t size,
	      enum dma_data_direction dir,
	      unsigned long attrs)
static void
or1k_sync_single_for_device(struct device *dev,
			    dma_addr_t dma_handle, size_t size,
			    enum dma_data_direction dir)
{
	unsigned long cl;
	dma_addr_t addr = page_to_phys(page) + offset;
	dma_addr_t addr = dma_handle;
	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];

	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
		return addr;

	switch (dir) {
	case DMA_TO_DEVICE:
		/* Flush the dcache for the requested range */
@@ -168,6 +164,20 @@ or1k_map_page(struct device *dev, struct page *page,
		break;
	}

}

static dma_addr_t
or1k_map_page(struct device *dev, struct page *page,
	      unsigned long offset, size_t size,
	      enum dma_data_direction dir,
	      unsigned long attrs)
{
	unsigned long cl;
	dma_addr_t addr = page_to_phys(page) + offset;
	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		or1k_sync_single_for_device(dev, addr, size, dir);
	return addr;
}

@@ -187,20 +197,6 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg,
	return nents;
}

static void
or1k_sync_single_for_device(struct device *dev,
			    dma_addr_t dma_handle, size_t size,
			    enum dma_data_direction dir)
{
	unsigned long cl;
	dma_addr_t addr = dma_handle;
	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];

	/* Flush the dcache for the requested range */
	for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
		mtspr(SPR_DCBFR, cl);
}

const struct dma_map_ops or1k_dma_map_ops = {
	.alloc = or1k_dma_alloc,
	.free = or1k_dma_free,