Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 160c1d8e authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar
Browse files

x86, ia64: convert to use generic dma_map_ops struct



This converts X86 and IA64 to use include/linux/dma-mapping.h.

It's a bit large but pretty boring. The major change for X86 is
converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping
operations. The major changes for IA64 is using map_page and
unmap_page instead of map_single and unmap_single.

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f0402a26
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -7,8 +7,8 @@

obj-y := setup.o
ifeq ($(CONFIG_DMAR), y)
obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
else
obj-$(CONFIG_IA64_GENERIC) += machvec.o
endif
obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o

arch/ia64/dig/dig_vtd_iommu.c

deleted100644 → 0
+0 −77
Original line number Diff line number Diff line
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/intel-iommu.h>

void *
vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		 gfp_t flags)
{
	return intel_alloc_coherent(dev, size, dma_handle, flags);
}
EXPORT_SYMBOL_GPL(vtd_alloc_coherent);

void
vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
		 dma_addr_t dma_handle)
{
	intel_free_coherent(dev, size, vaddr, dma_handle);
}
EXPORT_SYMBOL_GPL(vtd_free_coherent);

dma_addr_t
vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
		     int dir, struct dma_attrs *attrs)
{
	return intel_map_single(dev, (phys_addr_t)addr, size, dir);
}
EXPORT_SYMBOL_GPL(vtd_map_single_attrs);

void
vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
		       int dir, struct dma_attrs *attrs)
{
	intel_unmap_single(dev, iova, size, dir);
}
EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);

int
vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
		 int dir, struct dma_attrs *attrs)
{
	return intel_map_sg(dev, sglist, nents, dir);
}
EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);

void
vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
		   int nents, int dir, struct dma_attrs *attrs)
{
	intel_unmap_sg(dev, sglist, nents, dir);
}
EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);

int
vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return 0;
}
EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);

extern int iommu_dma_supported(struct device *dev, u64 mask);

struct dma_mapping_ops vtd_dma_ops = {
	.alloc_coherent		= vtd_alloc_coherent,
	.free_coherent		= vtd_free_coherent,
	.map_single_attrs	= vtd_map_single_attrs,
	.unmap_single_attrs	= vtd_unmap_single_attrs,
	.map_sg_attrs		= vtd_map_sg_attrs,
	.unmap_sg_attrs		= vtd_unmap_sg_attrs,
	.sync_single_for_cpu	= machvec_dma_sync_single,
	.sync_sg_for_cpu	= machvec_dma_sync_sg,
	.sync_single_for_device	= machvec_dma_sync_single,
	.sync_sg_for_device	= machvec_dma_sync_sg,
	.dma_supported_op	= iommu_dma_supported,
	.mapping_error		= vtd_dma_mapping_error,
};
+3 −3
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@
#include <linux/swiotlb.h>
#include <asm/machvec.h>

extern struct dma_mapping_ops sba_dma_ops, swiotlb_dma_ops;
extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;

/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
@@ -30,10 +30,10 @@ extern int swiotlb_late_init_with_default_size (size_t size);
static inline int use_swiotlb(struct device *dev)
{
	return dev && dev->dma_mask &&
		!sba_dma_ops.dma_supported_op(dev, *dev->dma_mask);
		!sba_dma_ops.dma_supported(dev, *dev->dma_mask);
}

struct dma_mapping_ops *hwsw_dma_get_ops(struct device *dev)
struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{
	if (use_swiotlb(dev))
		return &swiotlb_dma_ops;
+32 −14
Original line number Diff line number Diff line
@@ -909,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
 *
 * See Documentation/DMA-mapping.txt
 */
static dma_addr_t
sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
			       unsigned long poff, size_t size,
			       enum dma_data_direction dir,
			       struct dma_attrs *attrs)
{
	struct ioc *ioc;
	void *addr = page_address(page) + poff;
	dma_addr_t iovp;
	dma_addr_t offset;
	u64 *pdir_start;
@@ -992,6 +994,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
	return SBA_IOVA(ioc, iovp, offset);
}

static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
				       size_t size, enum dma_data_direction dir,
				       struct dma_attrs *attrs)
{
	return sba_map_page(dev, virt_to_page(addr),
			    (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
}

#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
 *
 * See Documentation/DMA-mapping.txt
 */
static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
				   int dir, struct dma_attrs *attrs)
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
			   enum dma_data_direction dir, struct dma_attrs *attrs)
{
	struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
@@ -1095,6 +1105,12 @@ static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t s
#endif /* DELAYED_RESOURCE_CNT == 0 */
}

void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
			    enum dma_data_direction dir, struct dma_attrs *attrs)
{
	sba_unmap_page(dev, iova, size, dir, attrs);
}

/**
 * sba_alloc_coherent - allocate/map shared mem for DMA
 * @dev: instance of PCI owned by the driver that's asking.
@@ -1423,7 +1439,8 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
 * See Documentation/DMA-mapping.txt
 */
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
			    int nents, int dir, struct dma_attrs *attrs)
			    int nents, enum dma_data_direction dir,
			    struct dma_attrs *attrs)
{
	struct ioc *ioc;
	int coalesced, filled = 0;
@@ -1514,7 +1531,8 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
 * See Documentation/DMA-mapping.txt
 */
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
			       int nents, int dir, struct dma_attrs *attrs)
			       int nents, enum dma_data_direction dir,
			       struct dma_attrs *attrs)
{
#ifdef ASSERT_PDIR_SANITY
	struct ioc *ioc;
@@ -2062,7 +2080,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
	},
};

extern struct dma_mapping_ops swiotlb_dma_ops;
extern struct dma_map_ops swiotlb_dma_ops;

static int __init
sba_init(void)
@@ -2176,18 +2194,18 @@ sba_page_override(char *str)

__setup("sbapagesize=",sba_page_override);

struct dma_mapping_ops sba_dma_ops = {
struct dma_map_ops sba_dma_ops = {
	.alloc_coherent		= sba_alloc_coherent,
	.free_coherent		= sba_free_coherent,
	.map_single_attrs	= sba_map_single_attrs,
	.unmap_single_attrs	= sba_unmap_single_attrs,
	.map_sg_attrs		= sba_map_sg_attrs,
	.unmap_sg_attrs		= sba_unmap_sg_attrs,
	.map_page		= sba_map_page,
	.unmap_page		= sba_unmap_page,
	.map_sg			= sba_map_sg_attrs,
	.unmap_sg		= sba_unmap_sg_attrs,
	.sync_single_for_cpu	= machvec_dma_sync_single,
	.sync_sg_for_cpu	= machvec_dma_sync_sg,
	.sync_single_for_device	= machvec_dma_sync_single,
	.sync_sg_for_device	= machvec_dma_sync_sg,
	.dma_supported_op	= sba_dma_supported,
	.dma_supported		= sba_dma_supported,
	.mapping_error		= sba_dma_mapping_error,
};

+33 −74
Original line number Diff line number Diff line
@@ -9,73 +9,21 @@
#include <linux/scatterlist.h>
#include <asm/swiotlb.h>

struct dma_mapping_ops {
	int             (*mapping_error)(struct device *dev,
					 dma_addr_t dma_addr);
	void*           (*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t gfp);
	void            (*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
	dma_addr_t      (*map_single)(struct device *hwdev, unsigned long ptr,
				size_t size, int direction);
	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
				size_t size, int direction);
	dma_addr_t      (*map_single_attrs)(struct device *dev, void *cpu_addr,
					    size_t size, int direction,
					    struct dma_attrs *attrs);
	void		(*unmap_single_attrs)(struct device *dev,
					      dma_addr_t dma_addr,
					      size_t size, int direction,
					      struct dma_attrs *attrs);
	void            (*sync_single_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_range_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_single_range_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_sg_for_cpu)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	void            (*sync_sg_for_device)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
				int nents, int direction);
	void            (*unmap_sg)(struct device *hwdev,
				struct scatterlist *sg, int nents,
				int direction);
	int             (*map_sg_attrs)(struct device *dev,
					struct scatterlist *sg, int nents,
					int direction, struct dma_attrs *attrs);
	void            (*unmap_sg_attrs)(struct device *dev,
					  struct scatterlist *sg, int nents,
					  int direction,
					  struct dma_attrs *attrs);
	int             (*dma_supported_op)(struct device *hwdev, u64 mask);
	int		is_phys;
};

extern struct dma_mapping_ops *dma_ops;
extern struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);

static inline void *dma_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *daddr, gfp_t gfp)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *caddr, dma_addr_t daddr)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->free_coherent(dev, size, caddr, daddr);
}

@@ -87,8 +35,10 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
					      enum dma_data_direction dir,
					      struct dma_attrs *attrs)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	return ops->map_single_attrs(dev, caddr, size, dir, attrs);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	return ops->map_page(dev, virt_to_page(caddr),
			     (unsigned long)caddr & ~PAGE_MASK, size,
			     dir, attrs);
}

static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
@@ -96,8 +46,8 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
					  enum dma_data_direction dir,
					  struct dma_attrs *attrs)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->unmap_page(dev, daddr, size, dir, attrs);
}

#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
@@ -107,8 +57,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				   int nents, enum dma_data_direction dir,
				   struct dma_attrs *attrs)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	return ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	return ops->map_sg(dev, sgl, nents, dir, attrs);
}

static inline void dma_unmap_sg_attrs(struct device *dev,
@@ -116,8 +66,8 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
				      enum dma_data_direction dir,
				      struct dma_attrs *attrs)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->unmap_sg(dev, sgl, nents, dir, attrs);
}

#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
@@ -127,7 +77,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
					   size_t size,
					   enum dma_data_direction dir)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->sync_single_for_cpu(dev, daddr, size, dir);
}

@@ -135,7 +85,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
				       struct scatterlist *sgl,
				       int nents, enum dma_data_direction dir)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->sync_sg_for_cpu(dev, sgl, nents, dir);
}

@@ -144,7 +94,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
					      size_t size,
					      enum dma_data_direction dir)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->sync_single_for_device(dev, daddr, size, dir);
}

@@ -153,20 +103,29 @@ static inline void dma_sync_sg_for_device(struct device *dev,
					  int nents,
					  enum dma_data_direction dir)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	ops->sync_sg_for_device(dev, sgl, nents, dir);
}

static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	return ops->mapping_error(dev, daddr);
}

#define dma_map_page(dev, pg, off, size, dir)				\
	dma_map_single(dev, page_address(pg) + (off), (size), (dir))
#define dma_unmap_page(dev, dma_addr, size, dir)			\
	dma_unmap_single(dev, dma_addr, size, dir)
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
				      size_t offset, size_t size,
				      enum dma_data_direction dir)
{
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	return ops->map_page(dev, page, offset, size, dir, NULL);
}

static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
				  size_t size, enum dma_data_direction dir)
{
	dma_unmap_single(dev, addr, size, dir);
}

/*
 * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
@@ -180,8 +139,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)

static inline int dma_supported(struct device *dev, u64 mask)
{
	struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
	return ops->dma_supported_op(dev, mask);
	struct dma_map_ops *ops = platform_dma_get_ops(dev);
	return ops->dma_supported(dev, mask);
}

static inline int
Loading