Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ec40f95d authored by Greg Ungerer's avatar Greg Ungerer
Browse files

m68knommu: fix DMA support for ColdFire



ColdFire CPU family members support DMA (all those with the FEC ethernet
core use it, the rest have dedicated DMA engines). The code support is
just missing a handful of routines for it to be usable by drivers.
Add the missing dma_ functions.

Signed-off-by: default avatarGreg Ungerer <gerg@uclinux.org>
parent 830c072b
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -16,6 +16,7 @@ config MMU


config NO_DMA
config NO_DMA
	bool
	bool
	depends on !COLDFIRE
	default y
	default y


config FPU
config FPU
+34 −3
Original line number Original line Diff line number Diff line
@@ -7,10 +7,9 @@


#include <linux/types.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/cacheflush.h>


void *dma_alloc_coherent(struct device *dev, size_t size,
void *dma_alloc_coherent(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t gfp)
			   dma_addr_t *dma_handle, gfp_t gfp)
@@ -36,7 +35,39 @@ void dma_free_coherent(struct device *dev, size_t size,
	free_pages((unsigned long)vaddr, get_order(size));
	free_pages((unsigned long)vaddr, get_order(size));
}
}


void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir)
void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
				size_t size, enum dma_data_direction dir)
{
{
	switch (dir) {
	case DMA_TO_DEVICE:
		flush_dcache_range(handle, size);
		break;
	case DMA_FROM_DEVICE:
		/* Should be clear already */
		break;
	default:
		if (printk_ratelimit())
			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
		break;
	}
	}
}

EXPORT_SYMBOL(dma_sync_single_for_device);
dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
			  enum dma_data_direction dir)
{
	dma_addr_t handle = virt_to_phys(addr);
	flush_dcache_range(handle, size);
	return handle;
}
EXPORT_SYMBOL(dma_map_single);


dma_addr_t dma_map_page(struct device *dev, struct page *page,
			unsigned long offset, size_t size,
			enum dma_data_direction dir)
{
	dma_addr_t handle = page_to_phys(page) + offset;
	dma_sync_single_for_device(dev, handle, size, dir);
	return handle;
}
EXPORT_SYMBOL(dma_map_page);