Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7dff840 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar
Browse files

x86: remove map_single and unmap_single in struct dma_mapping_ops



This patch converts dma_map_single and dma_unmap_single to use
map_page and unmap_page respectively and removes unnecessary
map_single and unmap_single in struct dma_mapping_ops.

This leaves intel-iommu's dma_map_single and dma_unmap_single since
IA64 uses them. They will be removed after the unification.

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 33feffd4
Loading
Loading
Loading
Loading
+6 −9
Original line number Diff line number Diff line
@@ -24,10 +24,6 @@ struct dma_mapping_ops {
				dma_addr_t *dma_handle, gfp_t gfp);
	void            (*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
	dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
				size_t size, int direction);
	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
				size_t size, int direction);
	void            (*sync_single_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
@@ -103,7 +99,9 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size,
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(direction));
	return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
	return ops->map_page(hwdev, virt_to_page(ptr),
			     (unsigned long)ptr & ~PAGE_MASK, size,
			     direction, NULL);
}

static inline void
@@ -113,8 +111,8 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
	struct dma_mapping_ops *ops = get_dma_ops(dev);

	BUG_ON(!valid_dma_direction(direction));
	if (ops->unmap_single)
		ops->unmap_single(dev, addr, size, direction);
	if (ops->unmap_page)
		ops->unmap_page(dev, addr, size, direction, NULL);
}

static inline int
@@ -221,8 +219,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	struct dma_mapping_ops *ops = get_dma_ops(dev);

	BUG_ON(!valid_dma_direction(direction));
	return ops->map_single(dev, page_to_phys(page) + offset,
			       size, direction);
	return ops->map_page(dev, page, offset, size, direction, NULL);
}

static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+0 −15
Original line number Diff line number Diff line
@@ -1341,13 +1341,6 @@ out:
	return addr;
}

static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
			     size_t size, int dir)
{
	return map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
			paddr & ~PAGE_MASK, size, dir, NULL);
}

/*
 * The exported unmap_single function for dma_ops.
 */
@@ -1378,12 +1371,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
	spin_unlock_irqrestore(&domain->lock, flags);
}

static void unmap_single(struct device *dev, dma_addr_t dma_addr,
			 size_t size, int dir)
{
	return unmap_page(dev, dma_addr, size, dir, NULL);
}

/*
 * This is a special map_sg function which is used if we should map a
 * device which is not handled by an AMD IOMMU in the system.
@@ -1664,8 +1651,6 @@ static void prealloc_protection_domains(void)
static struct dma_mapping_ops amd_iommu_dma_ops = {
	.alloc_coherent = alloc_coherent,
	.free_coherent = free_coherent,
	.map_single = map_single,
	.unmap_single = unmap_single,
	.map_page = map_page,
	.unmap_page = unmap_page,
	.map_sg = map_sg,
+0 −16
Original line number Diff line number Diff line
@@ -461,14 +461,6 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
	return iommu_alloc(dev, tbl, vaddr, npages, dir);
}

static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
				     size_t size, int direction)
{
	return calgary_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
				paddr & ~PAGE_MASK, size,
				direction, NULL);
}

static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
			       size_t size, enum dma_data_direction dir,
			       struct dma_attrs *attrs)
@@ -480,12 +472,6 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
	iommu_free(tbl, dma_addr, npages);
}

static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
				 size_t size, int direction)
{
	calgary_unmap_page(dev, dma_handle, size, direction, NULL);
}

static void* calgary_alloc_coherent(struct device *dev, size_t size,
	dma_addr_t *dma_handle, gfp_t flag)
{
@@ -535,8 +521,6 @@ static void calgary_free_coherent(struct device *dev, size_t size,
static struct dma_mapping_ops calgary_dma_ops = {
	.alloc_coherent = calgary_alloc_coherent,
	.free_coherent = calgary_free_coherent,
	.map_single = calgary_map_single,
	.unmap_single = calgary_unmap_single,
	.map_sg = calgary_map_sg,
	.unmap_sg = calgary_unmap_sg,
	.map_page = calgary_map_page,
+2 −17
Original line number Diff line number Diff line
@@ -275,13 +275,6 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page,
	return bus;
}

static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr,
				  size_t size, int dir)
{
	return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
			     paddr & ~PAGE_MASK, size, dir, NULL);
}

/*
 * Free a DMA mapping.
 */
@@ -306,12 +299,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
	free_iommu(iommu_page, npages);
}

static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
			      size_t size, int direction)
{
	gart_unmap_page(dev, dma_addr, size, direction, NULL);
}

/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
@@ -324,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
	for_each_sg(sg, s, nents, i) {
		if (!s->dma_length || !s->length)
			break;
		gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
		gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
	}
}

@@ -538,7 +525,7 @@ static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
		   dma_addr_t dma_addr)
{
	gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
	free_pages((unsigned long)vaddr, get_order(size));
}

@@ -725,8 +712,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
}

static struct dma_mapping_ops gart_dma_ops = {
	.map_single			= gart_map_single,
	.unmap_single			= gart_unmap_single,
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
	.map_page			= gart_map_page,
+0 −8
Original line number Diff line number Diff line
@@ -38,13 +38,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
	return bus;
}

static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr,
				   size_t size, int direction)
{
	return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
			      paddr & ~PAGE_MASK, size, direction, NULL);
}

/* Map a set of buffers described by scatterlist in streaming
 * mode for DMA.  This is the scatter-gather version of the
 * above pci_map_single interface.  Here the scatter gather list
@@ -88,7 +81,6 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
struct dma_mapping_ops nommu_dma_ops = {
	.alloc_coherent = dma_generic_alloc_coherent,
	.free_coherent = nommu_free_coherent,
	.map_single = nommu_map_single,
	.map_sg = nommu_map_sg,
	.map_page = nommu_map_page,
	.is_phys = 1,
Loading