Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5411ad27 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michal Simek
Browse files

microblaze: use generic dma_noncoherent_ops



Switch to the generic noncoherent direct mapping implementation.

This removes the direction-based optimizations in
sync_{single,sg}_for_{cpu,device} which were marked untestested and
do not match the usually very well tested {un,}map_{single,sg}
implementations.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMichal Simek <michal.simek@xilinx.com>
parent bd05a58d
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
config MICROBLAZE
	def_bool y
	select ARCH_HAS_GCOV_PROFILE_ALL
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select ARCH_MIGHT_HAVE_PC_PARPORT
	select ARCH_NO_COHERENT_DMA_MMAP if !MMU
	select ARCH_WANT_IPC_PARSE_VERSION
@@ -8,6 +10,8 @@ config MICROBLAZE
	select TIMER_OF
	select CLONE_BACKWARDS3
	select COMMON_CLK
	select DMA_NONCOHERENT_OPS
	select DMA_NONCOHERENT_MMAP
	select GENERIC_ATOMIC64
	select GENERIC_CLOCKEVENTS
	select GENERIC_CPU_DEVICES
+1 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@ generic-y += bugs.h
generic-y += compat.h
generic-y += device.h
generic-y += div64.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
+0 −28
Original line number Diff line number Diff line
/*
 * Implements the generic device dma API for microblaze and the pci
 *
 * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
 * Copyright (C) 2009-2010 PetaLogix
 *
 * This file is subject to the terms and conditions of the GNU General
 * Public License. See the file COPYING in the main directory of this
 * archive for more details.
 *
 * This file is base on powerpc and x86 dma-mapping.h versions
 * Copyright (C) 2004 IBM
 */

#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
#define _ASM_MICROBLAZE_DMA_MAPPING_H

/*
 * Available generic sets of operations
 */
extern const struct dma_map_ops dma_nommu_ops;

static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
	return &dma_nommu_ops;
}

#endif	/* _ASM_MICROBLAZE_DMA_MAPPING_H */
+0 −2
Original line number Diff line number Diff line
@@ -553,8 +553,6 @@ void __init *early_get_page(void);

extern unsigned long ioremap_bot, ioremap_base;

void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
void consistent_free(size_t size, void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
	size_t size, int direction);
+12 −132
Original line number Diff line number Diff line
@@ -8,29 +8,15 @@
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-noncoherent.h>
#include <linux/gfp.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <asm/cacheflush.h>

static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *dma_handle, gfp_t flag,
				       unsigned long attrs)
{
	return consistent_alloc(flag, size, dma_handle);
}

static void dma_nommu_free_coherent(struct device *dev, size_t size,
				     void *vaddr, dma_addr_t dma_handle,
				     unsigned long attrs)
{
	consistent_free(size, vaddr);
}

static inline void __dma_sync(unsigned long paddr,
			      size_t size, enum dma_data_direction direction)
static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
		enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
@@ -45,111 +31,19 @@ static inline void __dma_sync(unsigned long paddr,
	}
}

static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg);

		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
			continue;

		__dma_sync(sg_phys(sg), sg->length, direction);
	}

	return nents;
}

static inline dma_addr_t dma_nommu_map_page(struct device *dev,
					     struct page *page,
					     unsigned long offset,
					     size_t size,
					     enum dma_data_direction direction,
					     unsigned long attrs)
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		__dma_sync(page_to_phys(page) + offset, size, direction);
	return page_to_phys(page) + offset;
	__dma_sync(dev, paddr, size, dir);
}

static inline void dma_nommu_unmap_page(struct device *dev,
					 dma_addr_t dma_address,
					 size_t size,
					 enum dma_data_direction direction,
					 unsigned long attrs)
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
/* There is not necessary to do cache cleanup
 *
 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
 * dma_address is physical address
 */
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		__dma_sync(dma_address, size, direction);
	__dma_sync(dev, paddr, size, dir);
}

static inline void
dma_nommu_sync_single_for_cpu(struct device *dev,
			       dma_addr_t dma_handle, size_t size,
			       enum dma_data_direction direction)
{
	/*
	 * It's pointless to flush the cache as the memory segment
	 * is given to the CPU
	 */

	if (direction == DMA_FROM_DEVICE)
		__dma_sync(dma_handle, size, direction);
}

static inline void
dma_nommu_sync_single_for_device(struct device *dev,
				  dma_addr_t dma_handle, size_t size,
				  enum dma_data_direction direction)
{
	/*
	 * It's pointless to invalidate the cache if the device isn't
	 * supposed to write to the relevant region
	 */

	if (direction == DMA_TO_DEVICE)
		__dma_sync(dma_handle, size, direction);
}

static inline void
dma_nommu_sync_sg_for_cpu(struct device *dev,
			   struct scatterlist *sgl, int nents,
			   enum dma_data_direction direction)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	if (direction == DMA_FROM_DEVICE)
		for_each_sg(sgl, sg, nents, i)
			__dma_sync(sg->dma_address, sg->length, direction);
}

static inline void
dma_nommu_sync_sg_for_device(struct device *dev,
			      struct scatterlist *sgl, int nents,
			      enum dma_data_direction direction)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	if (direction == DMA_TO_DEVICE)
		for_each_sg(sgl, sg, nents, i)
			__dma_sync(sg->dma_address, sg->length, direction);
}

static
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t handle, size_t size,
		unsigned long attrs)
{
@@ -170,17 +64,3 @@ int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
	return -ENXIO;
#endif
}

const struct dma_map_ops dma_nommu_ops = {
	.alloc			= dma_nommu_alloc_coherent,
	.free			= dma_nommu_free_coherent,
	.mmap			= dma_nommu_mmap_coherent,
	.map_sg			= dma_nommu_map_sg,
	.map_page		= dma_nommu_map_page,
	.unmap_page		= dma_nommu_unmap_page,
	.sync_single_for_cpu	= dma_nommu_sync_single_for_cpu,
	.sync_single_for_device	= dma_nommu_sync_single_for_device,
	.sync_sg_for_cpu	= dma_nommu_sync_sg_for_cpu,
	.sync_sg_for_device	= dma_nommu_sync_sg_for_device,
};
EXPORT_SYMBOL(dma_nommu_ops);
Loading