Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee664a92 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar
Browse files

sparc: Use asm-generic/pci-dma-compat



This converts SPARC to use asm-generic/pci-dma-compat instead
of the homegrown mechnism.

SPARC32 has two dma_map_ops structures for pci and sbus
(removing arch/sparc/kernel/dma.c, PCI and SBUS DMA accessor).
The global 'dma_ops' is set to sbus_dma_ops and get_dma_ops()
returns pci32_dma_ops for pci devices so we can use the
appropriate dma mapping operations.

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Tested-by: default avatarRobert Reif <reif@earthlink.net>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
LKML-Reference: <1249872797-1314-8-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c2c07dbd
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -14,10 +14,15 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h)	(1)

extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops *dma_ops, pci32_dma_ops;
extern struct bus_type pci_bus_type;

static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
	if (dev->bus == &pci_bus_type)
		return &pci32_dma_ops;
#endif
	return dma_ops;
}

+3 −0
Original line number Diff line number Diff line
@@ -5,4 +5,7 @@
#else
#include <asm/pci_32.h>
#endif

#include <asm-generic/pci-dma-compat.h>

#endif
+0 −105
Original line number Diff line number Diff line
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
 */
#define PCI_DMA_BUS_IS_PHYS	(0)

#include <asm/scatterlist.h>

struct pci_dev;

/* Allocate and map kernel buffer using consistent mode DMA for a device.
 * hwdev should be valid struct pci_dev pointer for PCI devices.
 */
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);

/* Free and unmap a consistent DMA buffer.
 * cpu_addr is what was returned from pci_alloc_consistent,
 * size must be the same as what as passed into pci_alloc_consistent,
 * and likewise dma_addr must be the same as what *dma_addrp was set to.
 *
 * References to the memory and mappings assosciated with cpu_addr/dma_addr
 * past this call are illegal.
 */
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);

/* Map a single buffer of the indicated size for DMA in streaming mode.
 * The 32-bit bus address to use is returned.
 *
 * Once the device is given the dma address, the device owns this memory
 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
 */
extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);

/* Unmap a single streaming mode DMA translation.  The dma_addr and size
 * must match what was provided for in a previous pci_map_single call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);

/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
	dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
	(((PTR)->LEN_NAME) = (VAL))

/*
 * Same as above, only with pages instead of mapped addresses.
 */
extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
			unsigned long offset, size_t size, int direction);
extern void pci_unmap_page(struct pci_dev *hwdev,
			dma_addr_t dma_address, size_t size, int direction);

/* Map a set of buffers described by scatterlist in streaming
 * mode for DMA.  This is the scather-gather version of the
 * above pci_map_single interface.  Here the scatter gather list
 * elements are each tagged with the appropriate dma address
 * and length.  They are obtained via sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for pci_map_single are
 * the same here.
 */
extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);

/* Unmap a set of streaming mode DMA translations.
 * Again, cpu read rules concerning calls here are the same as for
 * pci_unmap_single() above.
 */
extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);

/* Make physical memory consistent for a single
 * streaming mode DMA translation after a transfer.
 *
 * If you perform a pci_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first perform a pci_dma_sync_for_device, and then the device
 * again owns the buffer.
 */
extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);

/* Make physical memory consistent for a set of streaming
 * mode DMA translations after a transfer.
 *
 * The same as pci_dma_sync_single_* but for a scatter-gather list,
 * same rules and usage.
 */
extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);

/* Return whether the given PCI device DMA address mask can
 * be supported properly.  For example, if your device can
 * only drive the low 24-bits during PCI bus mastering, then
 * you would pass 0x00ffffff as the mask to this function.
 */
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
	return 1;
}

#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
					enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif

#define PCI_DMA_ERROR_CODE      (~(dma_addr_t)0x0)

static inline int pci_dma_mapping_error(struct pci_dev *pdev,
					dma_addr_t dma_addr)
{
        return (dma_addr == PCI_DMA_ERROR_CODE);
}

struct device_node;
extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);

+0 −88
Original line number Diff line number Diff line
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
 */
#define PCI_DMA_BUS_IS_PHYS	(0)

static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
					 dma_addr_t *dma_handle)
{
	return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
}

static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
				       void *vaddr, dma_addr_t dma_handle)
{
	return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
}

static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
					size_t size, int direction)
{
	return dma_map_single(&pdev->dev, ptr, size,
			      (enum dma_data_direction) direction);
}

static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
				    size_t size, int direction)
{
	dma_unmap_single(&pdev->dev, dma_addr, size,
			 (enum dma_data_direction) direction);
}

#define pci_map_page(dev, page, off, size, dir) \
	pci_map_single(dev, (page_address(page) + (off)), size, dir)
#define pci_unmap_page(dev,addr,sz,dir) \
	pci_unmap_single(dev,addr,sz,dir)

/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
	dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
	(((PTR)->LEN_NAME) = (VAL))

static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
			     int nents, int direction)
{
	return dma_map_sg(&pdev->dev, sg, nents,
			  (enum dma_data_direction) direction);
}

static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
				int nents, int direction)
{
	dma_unmap_sg(&pdev->dev, sg, nents,
		     (enum dma_data_direction) direction);
}

static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
					       dma_addr_t dma_handle,
					       size_t size, int direction)
{
	dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
				(enum dma_data_direction) direction);
}

static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
						  dma_addr_t dma_handle,
						  size_t size, int direction)
{
	/* No flushing needed to sync cpu writes to the device.  */
}

static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
					   struct scatterlist *sg,
					   int nents, int direction)
{
	dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
			    (enum dma_data_direction) direction);
}

static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
					      struct scatterlist *sg,
					      int nelems, int direction)
{
	/* No flushing needed to sync cpu writes to the device.  */
}

/* Return whether the given PCI device DMA address mask can
 * be supported properly.  For example, if your device can
 * only drive the low 24-bits during PCI bus mastering, then
 * you would pass 0x00ffffff as the mask to this function.
 */
extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);

/* PCI IOMMU mapping bypass support. */

/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
#define PCI64_REQUIRED_MASK	(~(dma64_addr_t)0)
#define PCI64_ADDR_BASE		0xfffc000000000000UL

static inline int pci_dma_mapping_error(struct pci_dev *pdev,
					dma_addr_t dma_addr)
{
	return dma_mapping_error(&pdev->dev, dma_addr);
}

#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
					enum pci_dma_burst_strategy *strat,
+7 −148
Original line number Diff line number Diff line
@@ -13,13 +13,17 @@
#include <linux/pci.h>
#endif

#include "dma.h"

/*
 * Return whether the given PCI device DMA address mask can be
 * supported properly.  For example, if your device can only drive the
 * low 24-bits during PCI bus mastering, then you would pass
 * 0x00ffffff as the mask to this function.
 */
int dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		return pci_dma_supported(to_pci_dev(dev), mask);
		return 1;
#endif
	return 0;
}
@@ -34,148 +38,3 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
	return -EOPNOTSUPP;
}
EXPORT_SYMBOL(dma_set_mask);

static void *dma32_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flag)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
#endif
	return sbus_alloc_consistent(dev, size, dma_handle);
}

static void dma32_free_coherent(struct device *dev, size_t size,
				void *cpu_addr, dma_addr_t dma_handle)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_free_consistent(to_pci_dev(dev), size,
				    cpu_addr, dma_handle);
		return;
	}
#endif
	sbus_free_consistent(dev, size, cpu_addr, dma_handle);
}

static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction direction,
				 struct dma_attrs *attrs)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		return pci_map_page(to_pci_dev(dev), page, offset,
				    size, (int)direction);
#endif
	return sbus_map_page(dev, page, offset, size, (int)direction);
}

static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
			     size_t size, enum dma_data_direction direction,
			     struct dma_attrs *attrs)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_unmap_page(to_pci_dev(dev), dma_address,
			       size, (int)direction);
		return;
	}
#endif
	sbus_unmap_page(dev, dma_address, size, (int)direction);
}

static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction direction,
			struct dma_attrs *attrs)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
#endif
	return sbus_map_sg(dev, sg, nents, direction);
}

void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
		    int nents, enum dma_data_direction direction,
		    struct dma_attrs *attrs)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
		return;
	}
#endif
	sbus_unmap_sg(dev, sg, nents, (int)direction);
}

static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
				      size_t size,
				      enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
					    size, (int)direction);
		return;
	}
#endif
	sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
}

static void dma32_sync_single_for_device(struct device *dev,
					 dma_addr_t dma_handle, size_t size,
					 enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
					       size, (int)direction);
		return;
	}
#endif
	sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
}

static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
				  int nelems, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
					nelems, (int)direction);
		return;
	}
#endif
	BUG();
}

static void dma32_sync_sg_for_device(struct device *dev,
				     struct scatterlist *sg, int nelems,
				     enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type) {
		pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
					   nelems, (int)direction);
		return;
	}
#endif
	BUG();
}

static struct dma_map_ops dma32_dma_ops = {
	.alloc_coherent		= dma32_alloc_coherent,
	.free_coherent		= dma32_free_coherent,
	.map_page		= dma32_map_page,
	.unmap_page		= dma32_unmap_page,
	.map_sg			= dma32_map_sg,
	.unmap_sg		= dma32_unmap_sg,
	.sync_single_for_cpu	= dma32_sync_single_for_cpu,
	.sync_single_for_device	= dma32_sync_single_for_device,
	.sync_sg_for_cpu	= dma32_sync_sg_for_cpu,
	.sync_sg_for_device	= dma32_sync_sg_for_device,
};

struct dma_map_ops *dma_ops = &dma32_dma_ops;
EXPORT_SYMBOL(dma_ops);
Loading