Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd3b0e3e authored by Barry Song's avatar Barry Song Committed by Mike Frysinger
Browse files

Blackfin: dma-mapping.h: flesh out missing DMA mapping functions

parent a00b4fe5
Loading
Loading
Loading
Loading
+98 −23
Original line number Diff line number Diff line
@@ -7,9 +7,9 @@
#ifndef _BLACKFIN_DMA_MAPPING_H
#define _BLACKFIN_DMA_MAPPING_H

#include <asm/scatterlist.h>
#include <asm/cacheflush.h>
struct scatterlist;

void dma_alloc_init(unsigned long start, unsigned long end);
void *dma_alloc_coherent(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t gfp);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
@@ -20,13 +20,51 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 */
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_supported(d, m)         (1)
#define dma_get_cache_alignment()   (32)
#define dma_is_consistent(d, h)     (1)

static inline
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return 0;
}

extern void
__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
static inline void
_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
	if (!__builtin_constant_p(dir)) {
		__dma_sync(addr, size, dir);
		return;
	}

	switch (dir) {
	case DMA_NONE:
		BUG();
	case DMA_TO_DEVICE:		/* writeback only */
		flush_dcache_range(addr, addr + size);
		break;
	case DMA_FROM_DEVICE: /* invalidate only */
	case DMA_BIDIRECTIONAL: /* flush and invalidate */
		/* Blackfin has no dedicated invalidate (it includes a flush) */
		invalidate_dcache_range(addr, addr + size);
		break;
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.
 * The 32-bit bus address to use is returned.
@@ -34,8 +72,13 @@ int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 * Once the device is given the dma address, the device owns this memory
 * until either pci_unmap_single or pci_dma_sync_single is performed.
 */
extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
				 enum dma_data_direction direction);
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
	       enum dma_data_direction dir)
{
	_dma_sync((dma_addr_t)ptr, size, dir);
	return (dma_addr_t) ptr;
}

static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
@@ -53,8 +96,12 @@ dma_map_page(struct device *dev, struct page *page,
 * After this call, reads by the cpu to the buffer are guarenteed to see
 * whatever the device wrote there.
 */
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
			  enum dma_data_direction direction);
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		 enum dma_data_direction dir)
{
	BUG_ON(!valid_dma_direction(dir));
}

static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
@@ -80,38 +127,66 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
 * the same here.
 */
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		      enum dma_data_direction direction);
		      enum dma_data_direction dir);

/*
 * Unmap a set of streaming mode DMA translations.
 * Again, cpu read rules concerning calls here are the same as for
 * pci_unmap_single() above.
 */
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		      int nhwentries, enum dma_data_direction direction);
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg,
	     int nhwentries, enum dma_data_direction dir)
{
	BUG_ON(!valid_dma_direction(dir));
}

static inline void dma_sync_single_for_cpu(struct device *dev,
					dma_addr_t handle, size_t size,
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
			      unsigned long offset, size_t size,
			      enum dma_data_direction dir)
{
	BUG_ON(!valid_dma_direction(dir));
}

static inline void dma_sync_single_for_device(struct device *dev,
					dma_addr_t handle, size_t size,
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir)
{
	_dma_sync(handle + offset, size, dir);
}

static inline void dma_sync_sg_for_cpu(struct device *dev,
					struct scatterlist *sg,
					int nents, enum dma_data_direction dir)
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
			enum dma_data_direction dir)
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

static inline void dma_sync_sg_for_device(struct device *dev,
					struct scatterlist *sg,
					int nents, enum dma_data_direction dir)
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
			   enum dma_data_direction dir)
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
		    enum dma_data_direction dir)
{
	BUG_ON(!valid_dma_direction(dir));
}

extern void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
		       int nents, enum dma_data_direction dir);

static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
	       enum dma_data_direction dir)
{
	_dma_sync((dma_addr_t)vaddr, size, dir);
}

#endif				/* _BLACKFIN_DMA_MAPPING_H */
+1 −4
Original line number Diff line number Diff line
@@ -13,8 +13,7 @@
#include <asm/atomic.h>
#include <asm/blackfin.h>
#include <asm/page.h>

#define MAX_DMA_ADDRESS PAGE_OFFSET
#include <asm-generic/dma.h>

/* DMA_CONFIG Masks */
#define DMAEN			0x0001	/* DMA Channel Enable */
@@ -257,8 +256,6 @@ static inline void enable_dma(unsigned int channel)
	dma_ch[channel].regs->curr_y_count = 0;
	dma_ch[channel].regs->cfg |= DMAEN;
}
void free_dma(unsigned int channel);
int request_dma(unsigned int channel, const char *device_id);
int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);

static inline void dma_disable_irq(unsigned int channel)
+17 −37
Original line number Diff line number Diff line
@@ -7,17 +7,11 @@
 */

#include <linux/types.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
#include <asm/bfin-global.h>
#include <asm/sections.h>

static spinlock_t dma_page_lock;
static unsigned long *dma_page;
@@ -26,7 +20,7 @@ static unsigned long dma_base;
static unsigned long dma_size;
static unsigned int dma_initialized;

void dma_alloc_init(unsigned long start, unsigned long end)
static void dma_alloc_init(unsigned long start, unsigned long end)
{
	spin_lock_init(&dma_page_lock);
	dma_initialized = 0;
@@ -117,21 +111,14 @@ dma_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dma_free_coherent);

/*
 * Dummy functions defined for some existing drivers
 * Streaming DMA mappings
 */

dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
	       enum dma_data_direction direction)
void __dma_sync(dma_addr_t addr, size_t size,
		enum dma_data_direction dir)
{
	BUG_ON(direction == DMA_NONE);

	invalidate_dcache_range((unsigned long)ptr,
			(unsigned long)ptr + size);

	return (dma_addr_t) ptr;
	_dma_sync(addr, size, dir);
}
EXPORT_SYMBOL(dma_map_single);
EXPORT_SYMBOL(__dma_sync);

int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
@@ -139,30 +126,23 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++) {
		sg->dma_address = (dma_addr_t) sg_virt(sg);

		invalidate_dcache_range(sg_dma_address(sg),
					sg_dma_address(sg) +
					sg_dma_len(sg));
		__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
	}

	return nents;
}
EXPORT_SYMBOL(dma_map_sg);

void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction direction)
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
			    int nelems, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
}
EXPORT_SYMBOL(dma_unmap_single);
	int i;

void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nhwentries, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	for (i = 0; i < nelems; i++, sg++) {
		sg->dma_address = (dma_addr_t) sg_virt(sg);
		__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
	}
}
EXPORT_SYMBOL(dma_unmap_sg);
EXPORT_SYMBOL(dma_sync_sg_for_device);