Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 309df0c5 authored by Arthur Kepner's avatar Arthur Kepner Committed by Linus Torvalds
Browse files

dma/ia64: update ia64 machvecs, swiotlb.c



Change all ia64 machvecs to use the new dma_*map*_attrs() interfaces.
Implement the old dma_*map_*() interfaces in terms of the corresponding new
interfaces.  For ia64/sn, make use of one dma attribute,
DMA_ATTR_WRITE_BARRIER.  Introduce swiotlb_*map*_attrs() functions.

Signed-off-by: default avatarArthur Kepner <akepner@sgi.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a75b0a2f
Loading
Loading
Loading
Loading
+32 −29
Original line number Original line Diff line number Diff line
@@ -20,10 +20,10 @@
extern int swiotlb_late_init_with_default_size (size_t size);
extern int swiotlb_late_init_with_default_size (size_t size);
extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
extern ia64_mv_dma_map_single		swiotlb_map_single;
extern ia64_mv_dma_map_single_attrs	swiotlb_map_single_attrs;
extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
extern ia64_mv_dma_unmap_single_attrs	swiotlb_unmap_single_attrs;
extern ia64_mv_dma_map_sg		swiotlb_map_sg;
extern ia64_mv_dma_map_sg_attrs		swiotlb_map_sg_attrs;
extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
extern ia64_mv_dma_unmap_sg_attrs	swiotlb_unmap_sg_attrs;
extern ia64_mv_dma_supported		swiotlb_dma_supported;
extern ia64_mv_dma_supported		swiotlb_dma_supported;
extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;
extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;


@@ -31,19 +31,19 @@ extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;


extern ia64_mv_dma_alloc_coherent	sba_alloc_coherent;
extern ia64_mv_dma_alloc_coherent	sba_alloc_coherent;
extern ia64_mv_dma_free_coherent	sba_free_coherent;
extern ia64_mv_dma_free_coherent	sba_free_coherent;
extern ia64_mv_dma_map_single		sba_map_single;
extern ia64_mv_dma_map_single_attrs	sba_map_single_attrs;
extern ia64_mv_dma_unmap_single		sba_unmap_single;
extern ia64_mv_dma_unmap_single_attrs	sba_unmap_single_attrs;
extern ia64_mv_dma_map_sg		sba_map_sg;
extern ia64_mv_dma_map_sg_attrs		sba_map_sg_attrs;
extern ia64_mv_dma_unmap_sg		sba_unmap_sg;
extern ia64_mv_dma_unmap_sg_attrs	sba_unmap_sg_attrs;
extern ia64_mv_dma_supported		sba_dma_supported;
extern ia64_mv_dma_supported		sba_dma_supported;
extern ia64_mv_dma_mapping_error	sba_dma_mapping_error;
extern ia64_mv_dma_mapping_error	sba_dma_mapping_error;


#define hwiommu_alloc_coherent		sba_alloc_coherent
#define hwiommu_alloc_coherent		sba_alloc_coherent
#define hwiommu_free_coherent		sba_free_coherent
#define hwiommu_free_coherent		sba_free_coherent
#define hwiommu_map_single		sba_map_single
#define hwiommu_map_single_attrs	sba_map_single_attrs
#define hwiommu_unmap_single		sba_unmap_single
#define hwiommu_unmap_single_attrs	sba_unmap_single_attrs
#define hwiommu_map_sg			sba_map_sg
#define hwiommu_map_sg_attrs		sba_map_sg_attrs
#define hwiommu_unmap_sg		sba_unmap_sg
#define hwiommu_unmap_sg_attrs		sba_unmap_sg_attrs
#define hwiommu_dma_supported		sba_dma_supported
#define hwiommu_dma_supported		sba_dma_supported
#define hwiommu_dma_mapping_error	sba_dma_mapping_error
#define hwiommu_dma_mapping_error	sba_dma_mapping_error
#define hwiommu_sync_single_for_cpu	machvec_dma_sync_single
#define hwiommu_sync_single_for_cpu	machvec_dma_sync_single
@@ -98,41 +98,48 @@ hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma
}
}


dma_addr_t
dma_addr_t
hwsw_map_single (struct device *dev, void *addr, size_t size, int dir)
hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
		       struct dma_attrs *attrs)
{
{
	if (use_swiotlb(dev))
	if (use_swiotlb(dev))
		return swiotlb_map_single(dev, addr, size, dir);
		return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
	else
	else
		return hwiommu_map_single(dev, addr, size, dir);
		return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
}
}
EXPORT_SYMBOL(hwsw_map_single_attrs);


void
void
hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir)
hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
			 int dir, struct dma_attrs *attrs)
{
{
	if (use_swiotlb(dev))
	if (use_swiotlb(dev))
		return swiotlb_unmap_single(dev, iova, size, dir);
		return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
	else
	else
		return hwiommu_unmap_single(dev, iova, size, dir);
		return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
}
}

EXPORT_SYMBOL(hwsw_unmap_single_attrs);


int
int
hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
		   int dir, struct dma_attrs *attrs)
{
{
	if (use_swiotlb(dev))
	if (use_swiotlb(dev))
		return swiotlb_map_sg(dev, sglist, nents, dir);
		return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
	else
	else
		return hwiommu_map_sg(dev, sglist, nents, dir);
		return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
}
}
EXPORT_SYMBOL(hwsw_map_sg_attrs);


void
void
hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
		     int dir, struct dma_attrs *attrs)
{
{
	if (use_swiotlb(dev))
	if (use_swiotlb(dev))
		return swiotlb_unmap_sg(dev, sglist, nents, dir);
		return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
	else
	else
		return hwiommu_unmap_sg(dev, sglist, nents, dir);
		return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
}
}
EXPORT_SYMBOL(hwsw_unmap_sg_attrs);


void
void
hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
@@ -185,10 +192,6 @@ hwsw_dma_mapping_error (dma_addr_t dma_addr)
}
}


EXPORT_SYMBOL(hwsw_dma_mapping_error);
EXPORT_SYMBOL(hwsw_dma_mapping_error);
EXPORT_SYMBOL(hwsw_map_single);
EXPORT_SYMBOL(hwsw_unmap_single);
EXPORT_SYMBOL(hwsw_map_sg);
EXPORT_SYMBOL(hwsw_unmap_sg);
EXPORT_SYMBOL(hwsw_dma_supported);
EXPORT_SYMBOL(hwsw_dma_supported);
EXPORT_SYMBOL(hwsw_alloc_coherent);
EXPORT_SYMBOL(hwsw_alloc_coherent);
EXPORT_SYMBOL(hwsw_free_coherent);
EXPORT_SYMBOL(hwsw_free_coherent);
+37 −27
Original line number Original line Diff line number Diff line
@@ -899,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
}
}


/**
/**
 * sba_map_single - map one buffer and return IOVA for DMA
 * sba_map_single_attrs - map one buffer and return IOVA for DMA
 * @dev: instance of PCI owned by the driver that's asking.
 * @dev: instance of PCI owned by the driver that's asking.
 * @addr:  driver buffer to map.
 * @addr:  driver buffer to map.
 * @size:  number of bytes to map in driver buffer.
 * @size:  number of bytes to map in driver buffer.
 * @dir:  R/W or both.
 * @dir:  R/W or both.
 * @attrs: optional dma attributes
 *
 *
 * See Documentation/DMA-mapping.txt
 * See Documentation/DMA-mapping.txt
 */
 */
dma_addr_t
dma_addr_t
sba_map_single(struct device *dev, void *addr, size_t size, int dir)
sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
		     struct dma_attrs *attrs)
{
{
	struct ioc *ioc;
	struct ioc *ioc;
	dma_addr_t iovp;
	dma_addr_t iovp;
@@ -932,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
 		** Device is bit capable of DMA'ing to the buffer...
 		** Device is bit capable of DMA'ing to the buffer...
		** just return the PCI address of ptr
		** just return the PCI address of ptr
 		*/
 		*/
		DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
		DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
			   "0x%lx/0x%lx\n",
		           to_pci_dev(dev)->dma_mask, pci_addr);
		           to_pci_dev(dev)->dma_mask, pci_addr);
		return pci_addr;
		return pci_addr;
	}
	}
@@ -953,7 +956,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)


#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	spin_lock_irqsave(&ioc->res_lock, flags);
	if (sba_check_pdir(ioc,"Check before sba_map_single()"))
	if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
		panic("Sanity check failed");
		panic("Sanity check failed");
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
#endif
@@ -982,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
	/* form complete address */
	/* form complete address */
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	spin_lock_irqsave(&ioc->res_lock, flags);
	sba_check_pdir(ioc,"Check after sba_map_single()");
	sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
#endif
	return SBA_IOVA(ioc, iovp, offset);
	return SBA_IOVA(ioc, iovp, offset);
}
}
EXPORT_SYMBOL(sba_map_single_attrs);


#ifdef ENABLE_MARK_CLEAN
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
static SBA_INLINE void
@@ -1013,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
#endif
#endif


/**
/**
 * sba_unmap_single - unmap one IOVA and free resources
 * sba_unmap_single_attrs - unmap one IOVA and free resources
 * @dev: instance of PCI owned by the driver that's asking.
 * @dev: instance of PCI owned by the driver that's asking.
 * @iova:  IOVA of driver buffer previously mapped.
 * @iova:  IOVA of driver buffer previously mapped.
 * @size:  number of bytes mapped in driver buffer.
 * @size:  number of bytes mapped in driver buffer.
 * @dir:  R/W or both.
 * @dir:  R/W or both.
 * @attrs: optional dma attributes
 *
 *
 * See Documentation/DMA-mapping.txt
 * See Documentation/DMA-mapping.txt
 */
 */
void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
			    int dir, struct dma_attrs *attrs)
{
{
	struct ioc *ioc;
	struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
#if DELAYED_RESOURCE_CNT > 0
@@ -1038,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
		/*
		/*
		** Address does not fall w/in IOVA, must be bypassing
		** Address does not fall w/in IOVA, must be bypassing
		*/
		*/
		DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
		DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
			   iova);


#ifdef ENABLE_MARK_CLEAN
#ifdef ENABLE_MARK_CLEAN
		if (dir == DMA_FROM_DEVICE) {
		if (dir == DMA_FROM_DEVICE) {
@@ -1087,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
}

EXPORT_SYMBOL(sba_unmap_single_attrs);


/**
/**
 * sba_alloc_coherent - allocate/map shared mem for DMA
 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1144,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
	 * If device can't bypass or bypass is disabled, pass the 32bit fake
	 * If device can't bypass or bypass is disabled, pass the 32bit fake
	 * device to map single to get an iova mapping.
	 * device to map single to get an iova mapping.
	 */
	 */
	*dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
	*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
					   size, 0, NULL);


	return addr;
	return addr;
}
}
@@ -1161,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
 */
 */
void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
{
	sba_unmap_single(dev, dma_handle, size, 0);
	sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
	free_pages((unsigned long) vaddr, get_order(size));
	free_pages((unsigned long) vaddr, get_order(size));
}
}


@@ -1410,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
 * @sglist:  array of buffer/length pairs
 * @sglist:  array of buffer/length pairs
 * @nents:  number of entries in list
 * @nents:  number of entries in list
 * @dir:  R/W or both.
 * @dir:  R/W or both.
 * @attrs: optional dma attributes
 *
 *
 * See Documentation/DMA-mapping.txt
 * See Documentation/DMA-mapping.txt
 */
 */
int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
		     int dir, struct dma_attrs *attrs)
{
{
	struct ioc *ioc;
	struct ioc *ioc;
	int coalesced, filled = 0;
	int coalesced, filled = 0;
@@ -1441,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
	/* Fast path single entry scatterlists. */
	/* Fast path single entry scatterlists. */
	if (nents == 1) {
	if (nents == 1) {
		sglist->dma_length = sglist->length;
		sglist->dma_length = sglist->length;
		sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
		sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
		return 1;
		return 1;
	}
	}


#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	spin_lock_irqsave(&ioc->res_lock, flags);
	if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
	if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
	{
	{
		sba_dump_sg(ioc, sglist, nents);
		sba_dump_sg(ioc, sglist, nents);
		panic("Check before sba_map_sg()");
		panic("Check before sba_map_sg_attrs()");
	}
	}
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
#endif
@@ -1479,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di


#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	spin_lock_irqsave(&ioc->res_lock, flags);
	if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
	if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
	{
	{
		sba_dump_sg(ioc, sglist, nents);
		sba_dump_sg(ioc, sglist, nents);
		panic("Check after sba_map_sg()\n");
		panic("Check after sba_map_sg_attrs()\n");
	}
	}
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
#endif
@@ -1492,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di


	return filled;
	return filled;
}
}

EXPORT_SYMBOL(sba_map_sg_attrs);


/**
/**
 * sba_unmap_sg - unmap Scatter/Gather list
 * sba_unmap_sg_attrs - unmap Scatter/Gather list
 * @dev: instance of PCI owned by the driver that's asking.
 * @dev: instance of PCI owned by the driver that's asking.
 * @sglist:  array of buffer/length pairs
 * @sglist:  array of buffer/length pairs
 * @nents:  number of entries in list
 * @nents:  number of entries in list
 * @dir:  R/W or both.
 * @dir:  R/W or both.
 * @attrs: optional dma attributes
 *
 *
 * See Documentation/DMA-mapping.txt
 * See Documentation/DMA-mapping.txt
 */
 */
void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
			int nents, int dir, struct dma_attrs *attrs)
{
{
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
	struct ioc *ioc;
	struct ioc *ioc;
@@ -1518,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
	ASSERT(ioc);
	ASSERT(ioc);


	spin_lock_irqsave(&ioc->res_lock, flags);
	spin_lock_irqsave(&ioc->res_lock, flags);
	sba_check_pdir(ioc,"Check before sba_unmap_sg()");
	sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
#endif


	while (nents && sglist->dma_length) {
	while (nents && sglist->dma_length) {


		sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
		sba_unmap_single_attrs(dev, sglist->dma_address,
				       sglist->dma_length, dir, attrs);
		sglist = sg_next(sglist);
		sglist = sg_next(sglist);
		nents--;
		nents--;
	}
	}
@@ -1533,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in


#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	spin_lock_irqsave(&ioc->res_lock, flags);
	sba_check_pdir(ioc,"Check after sba_unmap_sg()");
	sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
#endif


}
}
EXPORT_SYMBOL(sba_unmap_sg_attrs);


/**************************************************************
/**************************************************************
*
*
@@ -2166,10 +2180,6 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override);
__setup("sbapagesize=",sba_page_override);


EXPORT_SYMBOL(sba_dma_mapping_error);
EXPORT_SYMBOL(sba_dma_mapping_error);
EXPORT_SYMBOL(sba_map_single);
EXPORT_SYMBOL(sba_unmap_single);
EXPORT_SYMBOL(sba_map_sg);
EXPORT_SYMBOL(sba_unmap_sg);
EXPORT_SYMBOL(sba_dma_supported);
EXPORT_SYMBOL(sba_dma_supported);
EXPORT_SYMBOL(sba_alloc_coherent);
EXPORT_SYMBOL(sba_alloc_coherent);
EXPORT_SYMBOL(sba_free_coherent);
EXPORT_SYMBOL(sba_free_coherent);
+60 −21
Original line number Original line Diff line number Diff line
@@ -10,6 +10,7 @@
 */
 */


#include <linux/module.h>
#include <linux/module.h>
#include <linux/dma-attrs.h>
#include <asm/dma.h>
#include <asm/dma.h>
#include <asm/sn/intr.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcibus_provider_defs.h>
@@ -149,11 +150,12 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
EXPORT_SYMBOL(sn_dma_free_coherent);
EXPORT_SYMBOL(sn_dma_free_coherent);


/**
/**
 * sn_dma_map_single - map a single page for DMA
 * sn_dma_map_single_attrs - map a single page for DMA
 * @dev: device to map for
 * @dev: device to map for
 * @cpu_addr: kernel virtual address of the region to map
 * @cpu_addr: kernel virtual address of the region to map
 * @size: size of the region
 * @size: size of the region
 * @direction: DMA direction
 * @direction: DMA direction
 * @attrs: optional dma attributes
 *
 *
 * Map the region pointed to by @cpu_addr for DMA and return the
 * Map the region pointed to by @cpu_addr for DMA and return the
 * DMA address.
 * DMA address.
@@ -163,42 +165,59 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
 * no way of saving the dmamap handle from the alloc to later free
 * no way of saving the dmamap handle from the alloc to later free
 * (which is pretty much unacceptable).
 * (which is pretty much unacceptable).
 *
 *
 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 * dma_map_consistent() so that writes force a flush of pending DMA.
 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 * Document Number: 007-4763-001)
 *
 * TODO: simplify our interface;
 * TODO: simplify our interface;
 *       figure out how to save dmamap handle so can use two step.
 *       figure out how to save dmamap handle so can use two step.
 */
 */
dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
			     int direction)
				   size_t size, int direction,
				   struct dma_attrs *attrs)
{
{
	dma_addr_t dma_addr;
	dma_addr_t dma_addr;
	unsigned long phys_addr;
	unsigned long phys_addr;
	struct pci_dev *pdev = to_pci_dev(dev);
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int dmabarr;

	dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);


	BUG_ON(dev->bus != &pci_bus_type);
	BUG_ON(dev->bus != &pci_bus_type);


	phys_addr = __pa(cpu_addr);
	phys_addr = __pa(cpu_addr);
	dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
	if (dmabarr)
		dma_addr = provider->dma_map_consistent(pdev, phys_addr,
							size, SN_DMA_ADDR_PHYS);
	else
		dma_addr = provider->dma_map(pdev, phys_addr, size,
					     SN_DMA_ADDR_PHYS);

	if (!dma_addr) {
	if (!dma_addr) {
		printk(KERN_ERR "%s: out of ATEs\n", __func__);
		printk(KERN_ERR "%s: out of ATEs\n", __func__);
		return 0;
		return 0;
	}
	}
	return dma_addr;
	return dma_addr;
}
}
EXPORT_SYMBOL(sn_dma_map_single);
EXPORT_SYMBOL(sn_dma_map_single_attrs);


/**
/**
 * sn_dma_unmap_single - unamp a DMA mapped page
 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
 * @dev: device to sync
 * @dev: device to sync
 * @dma_addr: DMA address to sync
 * @dma_addr: DMA address to sync
 * @size: size of region
 * @size: size of region
 * @direction: DMA direction
 * @direction: DMA direction
 * @attrs: optional dma attributes
 *
 *
 * This routine is supposed to sync the DMA region specified
 * This routine is supposed to sync the DMA region specified
 * by @dma_handle into the coherence domain.  On SN, we're always cache
 * by @dma_handle into the coherence domain.  On SN, we're always cache
 * coherent, so we just need to free any ATEs associated with this mapping.
 * coherent, so we just need to free any ATEs associated with this mapping.
 */
 */
void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
			 int direction)
			       size_t size, int direction,
			       struct dma_attrs *attrs)
{
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -207,19 +226,21 @@ void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,


	provider->dma_unmap(pdev, dma_addr, direction);
	provider->dma_unmap(pdev, dma_addr, direction);
}
}
EXPORT_SYMBOL(sn_dma_unmap_single);
EXPORT_SYMBOL(sn_dma_unmap_single_attrs);


/**
/**
 * sn_dma_unmap_sg - unmap a DMA scatterlist
 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
 * @dev: device to unmap
 * @dev: device to unmap
 * @sg: scatterlist to unmap
 * @sg: scatterlist to unmap
 * @nhwentries: number of scatterlist entries
 * @nhwentries: number of scatterlist entries
 * @direction: DMA direction
 * @direction: DMA direction
 * @attrs: optional dma attributes
 *
 *
 * Unmap a set of streaming mode DMA translations.
 * Unmap a set of streaming mode DMA translations.
 */
 */
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
		     int nhwentries, int direction)
			   int nhwentries, int direction,
			   struct dma_attrs *attrs)
{
{
	int i;
	int i;
	struct pci_dev *pdev = to_pci_dev(dev);
	struct pci_dev *pdev = to_pci_dev(dev);
@@ -234,25 +255,34 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
		sg->dma_length = 0;
		sg->dma_length = 0;
	}
	}
}
}
EXPORT_SYMBOL(sn_dma_unmap_sg);
EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);


/**
/**
 * sn_dma_map_sg - map a scatterlist for DMA
 * sn_dma_map_sg_attrs - map a scatterlist for DMA
 * @dev: device to map for
 * @dev: device to map for
 * @sg: scatterlist to map
 * @sg: scatterlist to map
 * @nhwentries: number of entries
 * @nhwentries: number of entries
 * @direction: direction of the DMA transaction
 * @direction: direction of the DMA transaction
 * @attrs: optional dma attributes
 *
 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 * dma_map_consistent() so that writes force a flush of pending DMA.
 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 * Document Number: 007-4763-001)
 *
 *
 * Maps each entry of @sg for DMA.
 * Maps each entry of @sg for DMA.
 */
 */
int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
		  int direction)
			int nhwentries, int direction, struct dma_attrs *attrs)
{
{
	unsigned long phys_addr;
	unsigned long phys_addr;
	struct scatterlist *saved_sg = sgl, *sg;
	struct scatterlist *saved_sg = sgl, *sg;
	struct pci_dev *pdev = to_pci_dev(dev);
	struct pci_dev *pdev = to_pci_dev(dev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int i;
	int i;
	int dmabarr;

	dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);


	BUG_ON(dev->bus != &pci_bus_type);
	BUG_ON(dev->bus != &pci_bus_type);


@@ -260,11 +290,19 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
	 * Setup a DMA address for each entry in the scatterlist.
	 * Setup a DMA address for each entry in the scatterlist.
	 */
	 */
	for_each_sg(sgl, sg, nhwentries, i) {
	for_each_sg(sgl, sg, nhwentries, i) {
		dma_addr_t dma_addr;
		phys_addr = SG_ENT_PHYS_ADDRESS(sg);
		phys_addr = SG_ENT_PHYS_ADDRESS(sg);
		sg->dma_address = provider->dma_map(pdev,
		if (dmabarr)
						    phys_addr, sg->length,
			dma_addr = provider->dma_map_consistent(pdev,
								phys_addr,
								sg->length,
								SN_DMA_ADDR_PHYS);
		else
			dma_addr = provider->dma_map(pdev, phys_addr,
						     sg->length,
						     SN_DMA_ADDR_PHYS);
						     SN_DMA_ADDR_PHYS);


		sg->dma_address = dma_addr;
		if (!sg->dma_address) {
		if (!sg->dma_address) {
			printk(KERN_ERR "%s: out of ATEs\n", __func__);
			printk(KERN_ERR "%s: out of ATEs\n", __func__);


@@ -272,7 +310,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
			 * Free any successfully allocated entries.
			 * Free any successfully allocated entries.
			 */
			 */
			if (i > 0)
			if (i > 0)
				sn_dma_unmap_sg(dev, saved_sg, i, direction);
				sn_dma_unmap_sg_attrs(dev, saved_sg, i,
						      direction, attrs);
			return 0;
			return 0;
		}
		}


@@ -281,7 +320,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,


	return nhwentries;
	return nhwentries;
}
}
EXPORT_SYMBOL(sn_dma_map_sg);
EXPORT_SYMBOL(sn_dma_map_sg_attrs);


void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
				size_t size, int direction)
				size_t size, int direction)
+24 −4
Original line number Original line Diff line number Diff line
@@ -23,10 +23,30 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
{
{
	dma_free_coherent(dev, size, cpu_addr, dma_handle);
	dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
}
#define dma_map_single		platform_dma_map_single
#define dma_map_single_attrs	platform_dma_map_single_attrs
#define dma_map_sg		platform_dma_map_sg
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
#define dma_unmap_single	platform_dma_unmap_single
					size_t size, int dir)
#define dma_unmap_sg		platform_dma_unmap_sg
{
	return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
}
#define dma_map_sg_attrs	platform_dma_map_sg_attrs
static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, int dir)
{
	return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
}
#define dma_unmap_single_attrs	platform_dma_unmap_single_attrs
static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
				    size_t size, int dir)
{
	return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
}
#define dma_unmap_sg_attrs	platform_dma_unmap_sg_attrs
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
				int nents, int dir)
{
	return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
}
#define dma_sync_single_for_cpu	platform_dma_sync_single_for_cpu
#define dma_sync_single_for_cpu	platform_dma_sync_single_for_cpu
#define dma_sync_sg_for_cpu	platform_dma_sync_sg_for_cpu
#define dma_sync_sg_for_cpu	platform_dma_sync_sg_for_cpu
#define dma_sync_single_for_device platform_dma_sync_single_for_device
#define dma_sync_single_for_device platform_dma_sync_single_for_device
+30 −20
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@ struct pci_bus;
struct task_struct;
struct task_struct;
struct pci_dev;
struct pci_dev;
struct msi_desc;
struct msi_desc;
struct dma_attrs;


typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
typedef void ia64_mv_cpu_init_t (void);
@@ -56,6 +57,11 @@ typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist
typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
typedef int ia64_mv_dma_supported (struct device *, u64);
typedef int ia64_mv_dma_supported (struct device *, u64);


typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);

/*
/*
 * WARNING: The legacy I/O space is _architected_.  Platforms are
 * WARNING: The legacy I/O space is _architected_.  Platforms are
 * expected to follow this architected model (see Section 10.7 in the
 * expected to follow this architected model (see Section 10.7 in the
@@ -136,10 +142,10 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
#  define platform_dma_init		ia64_mv.dma_init
#  define platform_dma_init		ia64_mv.dma_init
#  define platform_dma_alloc_coherent	ia64_mv.dma_alloc_coherent
#  define platform_dma_alloc_coherent	ia64_mv.dma_alloc_coherent
#  define platform_dma_free_coherent	ia64_mv.dma_free_coherent
#  define platform_dma_free_coherent	ia64_mv.dma_free_coherent
#  define platform_dma_map_single	ia64_mv.dma_map_single
#  define platform_dma_map_single_attrs	ia64_mv.dma_map_single_attrs
#  define platform_dma_unmap_single	ia64_mv.dma_unmap_single
#  define platform_dma_unmap_single_attrs	ia64_mv.dma_unmap_single_attrs
#  define platform_dma_map_sg		ia64_mv.dma_map_sg
#  define platform_dma_map_sg_attrs	ia64_mv.dma_map_sg_attrs
#  define platform_dma_unmap_sg		ia64_mv.dma_unmap_sg
#  define platform_dma_unmap_sg_attrs	ia64_mv.dma_unmap_sg_attrs
#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
#  define platform_dma_sync_sg_for_cpu	ia64_mv.dma_sync_sg_for_cpu
#  define platform_dma_sync_sg_for_cpu	ia64_mv.dma_sync_sg_for_cpu
#  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
#  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
@@ -190,10 +196,10 @@ struct ia64_machine_vector {
	ia64_mv_dma_init *dma_init;
	ia64_mv_dma_init *dma_init;
	ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
	ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
	ia64_mv_dma_free_coherent *dma_free_coherent;
	ia64_mv_dma_free_coherent *dma_free_coherent;
	ia64_mv_dma_map_single *dma_map_single;
	ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
	ia64_mv_dma_unmap_single *dma_unmap_single;
	ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
	ia64_mv_dma_map_sg *dma_map_sg;
	ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
	ia64_mv_dma_unmap_sg *dma_unmap_sg;
	ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
	ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
	ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
	ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
	ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
	ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
	ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
@@ -240,10 +246,10 @@ struct ia64_machine_vector {
	platform_dma_init,			\
	platform_dma_init,			\
	platform_dma_alloc_coherent,		\
	platform_dma_alloc_coherent,		\
	platform_dma_free_coherent,		\
	platform_dma_free_coherent,		\
	platform_dma_map_single,		\
	platform_dma_map_single_attrs,		\
	platform_dma_unmap_single,		\
	platform_dma_unmap_single_attrs,	\
	platform_dma_map_sg,			\
	platform_dma_map_sg_attrs,		\
	platform_dma_unmap_sg,			\
	platform_dma_unmap_sg_attrs,		\
	platform_dma_sync_single_for_cpu,	\
	platform_dma_sync_single_for_cpu,	\
	platform_dma_sync_sg_for_cpu,		\
	platform_dma_sync_sg_for_cpu,		\
	platform_dma_sync_single_for_device,	\
	platform_dma_sync_single_for_device,	\
@@ -292,9 +298,13 @@ extern ia64_mv_dma_init swiotlb_init;
extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
extern ia64_mv_dma_map_single		swiotlb_map_single;
extern ia64_mv_dma_map_single		swiotlb_map_single;
extern ia64_mv_dma_map_single_attrs	swiotlb_map_single_attrs;
extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
extern ia64_mv_dma_unmap_single_attrs	swiotlb_unmap_single_attrs;
extern ia64_mv_dma_map_sg		swiotlb_map_sg;
extern ia64_mv_dma_map_sg		swiotlb_map_sg;
extern ia64_mv_dma_map_sg_attrs		swiotlb_map_sg_attrs;
extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
extern ia64_mv_dma_unmap_sg_attrs	swiotlb_unmap_sg_attrs;
extern ia64_mv_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu;
extern ia64_mv_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu;
extern ia64_mv_dma_sync_sg_for_cpu	swiotlb_sync_sg_for_cpu;
extern ia64_mv_dma_sync_sg_for_cpu	swiotlb_sync_sg_for_cpu;
extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
@@ -340,17 +350,17 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
#ifndef platform_dma_free_coherent
#ifndef platform_dma_free_coherent
# define platform_dma_free_coherent	swiotlb_free_coherent
# define platform_dma_free_coherent	swiotlb_free_coherent
#endif
#endif
#ifndef platform_dma_map_single
#ifndef platform_dma_map_single_attrs
# define platform_dma_map_single	swiotlb_map_single
# define platform_dma_map_single_attrs	swiotlb_map_single_attrs
#endif
#endif
#ifndef platform_dma_unmap_single
#ifndef platform_dma_unmap_single_attrs
# define platform_dma_unmap_single	swiotlb_unmap_single
# define platform_dma_unmap_single_attrs	swiotlb_unmap_single_attrs
#endif
#endif
#ifndef platform_dma_map_sg
#ifndef platform_dma_map_sg_attrs
# define platform_dma_map_sg		swiotlb_map_sg
# define platform_dma_map_sg_attrs	swiotlb_map_sg_attrs
#endif
#endif
#ifndef platform_dma_unmap_sg
#ifndef platform_dma_unmap_sg_attrs
# define platform_dma_unmap_sg		swiotlb_unmap_sg
# define platform_dma_unmap_sg_attrs	swiotlb_unmap_sg_attrs
#endif
#endif
#ifndef platform_dma_sync_single_for_cpu
#ifndef platform_dma_sync_single_for_cpu
# define platform_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu
# define platform_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu
Loading