Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 15237e1f authored by Marek Szyprowski's avatar Marek Szyprowski
Browse files

ARM: dma-mapping: move all dma bounce code to separate dma ops structure



This patch removes dma bounce hooks from the common dma mapping
implementation on ARM architecture and creates a separate set of
dma_map_ops for dma bounce devices.

Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Tested-By: default avatarSubash Patel <subash.ramaswamy@linaro.org>
parent 2a550e73
Loading
Loading
Loading
Loading
+49 −13
Original line number Diff line number Diff line
@@ -308,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir)
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
	dma_addr_t dma_addr;
	int ret;
@@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
		return DMA_ERROR_CODE;

	if (ret == 0) {
		__dma_page_cpu_to_dev(page, offset, size, dir);
		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
		return dma_addr;
	}

@@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,

	return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(__dma_map_page);

/*
 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir, struct dma_attrs *attrs)
{
	struct safe_buffer *buf;

@@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,

	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
	if (!buf) {
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
			dma_addr & ~PAGE_MASK, size, dir);
		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
		return;
	}

	unmap_single(dev, buf, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_page);

int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		size_t sz, enum dma_data_direction dir)
{
	struct safe_buffer *buf;
@@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
	}
	return 0;
}
EXPORT_SYMBOL(dmabounce_sync_for_cpu);

int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
static void dmabounce_sync_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
		return;

	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
}

static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		size_t sz, enum dma_data_direction dir)
{
	struct safe_buffer *buf;
@@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
	}
	return 0;
}
EXPORT_SYMBOL(dmabounce_sync_for_device);

static void dmabounce_sync_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
		return;

	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}

static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
{
	if (dev->archdata.dmabounce)
		return 0;

	return arm_dma_ops.set_dma_mask(dev, dma_mask);
}

static struct dma_map_ops dmabounce_ops = {
	.map_page		= dmabounce_map_page,
	.unmap_page		= dmabounce_unmap_page,
	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
	.sync_single_for_device	= dmabounce_sync_for_device,
	.map_sg			= arm_dma_map_sg,
	.unmap_sg		= arm_dma_unmap_sg,
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.set_dma_mask		= dmabounce_set_mask,
};

static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
@@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif

	dev->archdata.dmabounce = device_info;
	set_dma_ops(dev, &dmabounce_ops);

	dev_info(dev, "dmabounce: registered device\n");

@@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct device *dev)
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;

	dev->archdata.dmabounce = NULL;
	set_dma_ops(dev, NULL);

	if (!device_info) {
		dev_warn(dev,
+2 −97
Original line number Diff line number Diff line
@@ -84,62 +84,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif

/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 * Private support functions: these are not part of the API and are
 * liable to change.  Drivers must not use these.
 */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	extern void ___dma_single_cpu_to_dev(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_cpu_to_dev(kaddr, size, dir);
}

static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	extern void ___dma_single_dev_to_cpu(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_dev_to_cpu(kaddr, size, dir);
}

static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_cpu_to_dev(page, off, size, dir);
}

static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_dev_to_cpu(page, off, size, dir);
}

extern int dma_supported(struct device *, u64);
extern int dma_set_mask(struct device *, u64);
/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
@@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}

extern int dma_supported(struct device *dev, u64 mask);

/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
extern void __init init_consistent_dma_size(unsigned long size);


#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
@@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
 */
extern void dmabounce_unregister_dev(struct device *);

/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
extern dma_addr_t __dma_map_page(struct device *, struct page *,
		unsigned long, size_t, enum dma_data_direction);
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
		enum dma_data_direction);

/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
#else
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	size_t size, enum dma_data_direction dir)
{
	return 1;
}


static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{
	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
		handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */

/*
 * The scatter list versions of the above methods.
+69 −10
Original line number Diff line number Diff line
@@ -29,6 +29,75 @@

#include "mm.h"

/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 * Private support functions: these are not part of the API and are
 * liable to change.  Drivers must not use these.
 */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	extern void ___dma_single_cpu_to_dev(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_cpu_to_dev(kaddr, size, dir);
}

static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	extern void ___dma_single_dev_to_cpu(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_dev_to_cpu(kaddr, size, dir);
}

static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_cpu_to_dev(page, off, size, dir);
}

static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_dev_to_cpu(page, off, size, dir);
}


static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{
	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
		handle & ~PAGE_MASK, size, dir);
}

/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -76,9 +145,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev,
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
	if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
		return;

	__dma_page_dev_to_cpu(page, offset, size, dir);
}

@@ -87,9 +153,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev,
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
	if (!dmabounce_sync_for_device(dev, handle, size, dir))
		return;

	__dma_page_cpu_to_dev(page, offset, size, dir);
}

@@ -599,7 +662,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
	}
	/* FIXME: non-speculating: flush on bidirectional mappings? */
}
EXPORT_SYMBOL(___dma_page_cpu_to_dev);

void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
@@ -619,7 +681,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
		set_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(___dma_page_dev_to_cpu);

/**
 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
@@ -737,9 +798,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

#ifndef CONFIG_DMABOUNCE
	*dev->dma_mask = dma_mask;
#endif

	return 0;
}