Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c8a0ec5 authored by Russell King's avatar Russell King Committed by Russell King
Browse files

[ARM] dma: use new dmabounce_sync_for_xxx() for dma_sync_single_xxx()

parent 2638b4db
Loading
Loading
Loading
Loading
+0 −24
Original line number Diff line number Diff line
@@ -444,30 +444,6 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
	unmap_single(dev, dma_addr, size, dir);
}

void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
		__func__, dma_addr, offset, size, dir);

	if (sync_single(dev, dma_addr, offset + size, dir))
		dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
}
EXPORT_SYMBOL(dma_sync_single_range_for_cpu);

void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr,
				      unsigned long offset, size_t size,
				      enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
		__func__, dma_addr, offset, size, dir);

	if (sync_single(dev, dma_addr, offset + size, dir))
		dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
}
EXPORT_SYMBOL(dma_sync_single_range_for_device);

int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
{
+76 −74
Original line number Diff line number Diff line
@@ -184,6 +184,76 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t handle, size_t size);


#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 *
 */
extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

/**
 * dma_needs_bounce
 *
 * @dev: valid struct device pointer
 * @dma_handle: dma_handle of unbounced buffer
 * @size: size of region being mapped
 *
 * Platforms that utilize the dmabounce mechanism must implement
 * this function.
 *
 * The dmabounce routines call this function whenever a dma-mapping
 * is requested to determine whether a given buffer needs to be bounced
 * or not. The function must return 0 if the buffer is OK for
 * DMA access and 1 if the buffer needs to be bounced.
 *
 */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);

/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
			size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
			size_t, enum dma_data_direction);
#else
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir)	(1)
#define dmabounce_sync_for_device(dev,dma,off,sz,dir)	(1)
#endif /* CONFIG_DMABOUNCE */


/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -308,12 +378,14 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
#ifndef CONFIG_DMABOUNCE
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
			      unsigned long offset, size_t size,
			      enum dma_data_direction dir)
{
	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
		return;

	if (!arch_is_coherent())
		dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
@@ -323,13 +395,12 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir)
{
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

	if (!arch_is_coherent())
		dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
#else
extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
#endif

static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
@@ -354,74 +425,5 @@ extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum d
extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);


#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 *
 */
extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

/**
 * dma_needs_bounce
 *
 * @dev: valid struct device pointer
 * @dma_handle: dma_handle of unbounced buffer
 * @size: size of region being mapped
 *
 * Platforms that utilize the dmabounce mechanism must implement
 * this function.
 *
 * The dmabounce routines call this function whenever a dma-mapping
 * is requested to determine whether a given buffer needs to be bounced
 * or not. The function must return 0 if the buffer is OK for
 * DMA access and 1 if the buffer needs to be bounced.
 *
 */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);

/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
			size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
			size_t, enum dma_data_direction);
#else
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir)	(1)
#define dmabounce_sync_for_device(dev,dma,off,sz,dir)	(1)
#endif /* CONFIG_DMABOUNCE */

#endif /* __KERNEL__ */
#endif