Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 738f2b7b authored by David S. Miller's avatar David S. Miller
Browse files

sparc: Convert all SBUS drivers to dma_*() interfaces.



And all the SBUS dma interfaces are deleted.

A private implementation remains inside of the 32-bit sparc port which
exists only for the sake of the implementation of dma_*().

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 944c67df
Loading
Loading
Loading
Loading
+0 −17
Original line number Diff line number Diff line
@@ -109,26 +109,9 @@ extern void sbus_set_sbus64(struct sbus_dev *, int);
extern void sbus_fill_device_irq(struct sbus_dev *);

/* These yield IOMMU mappings in consistent mode. */
extern void *sbus_alloc_consistent(struct device *, long, u32 *dma_addrp);
extern void sbus_free_consistent(struct device *, long, void *, u32);
void prom_adjust_ranges(struct linux_prom_ranges *, int,
			struct linux_prom_ranges *, int);

#define SBUS_DMA_BIDIRECTIONAL	DMA_BIDIRECTIONAL
#define SBUS_DMA_TODEVICE	DMA_TO_DEVICE
#define SBUS_DMA_FROMDEVICE	DMA_FROM_DEVICE
#define	SBUS_DMA_NONE		DMA_NONE

/* All the rest use streaming mode mappings. */
extern dma_addr_t sbus_map_single(struct device *, void *, size_t, int);
extern void sbus_unmap_single(struct device *, dma_addr_t, size_t, int);
extern int sbus_map_sg(struct device *, struct scatterlist *, int, int);
extern void sbus_unmap_sg(struct device *, struct scatterlist *, int, int);

/* Finally, allow explicit synchronization of streamable mappings. */
extern void sbus_dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, int);
extern void sbus_dma_sync_single_for_device(struct device *, dma_addr_t, size_t, int);

/* Eric Brower (ebrower@usa.net)
 * Translate SBus interrupt levels to ino values--
 * this is used when converting sbus "interrupts" OBP
+0 −63
Original line number Diff line number Diff line
@@ -100,69 +100,6 @@ extern struct sbus_bus *sbus_root;
extern void sbus_set_sbus64(struct sbus_dev *, int);
extern void sbus_fill_device_irq(struct sbus_dev *);

static inline void *sbus_alloc_consistent(struct device *dev , size_t size,
					  dma_addr_t *dma_handle)
{
	return dma_alloc_coherent(dev, size, dma_handle, GFP_ATOMIC);
}

static inline void sbus_free_consistent(struct device *dev, size_t size,
					void *vaddr, dma_addr_t dma_handle)
{
	return dma_free_coherent(dev, size, vaddr, dma_handle);
}

#define SBUS_DMA_BIDIRECTIONAL	DMA_BIDIRECTIONAL
#define SBUS_DMA_TODEVICE	DMA_TO_DEVICE
#define SBUS_DMA_FROMDEVICE	DMA_FROM_DEVICE
#define	SBUS_DMA_NONE		DMA_NONE

/* All the rest use streaming mode mappings. */
static inline dma_addr_t sbus_map_single(struct device *dev, void *ptr,
					 size_t size, int direction)
{
	return dma_map_single(dev, ptr, size,
			      (enum dma_data_direction) direction);
}

static inline void sbus_unmap_single(struct device *dev,
				     dma_addr_t dma_addr, size_t size,
				     int direction)
{
	dma_unmap_single(dev, dma_addr, size,
			 (enum dma_data_direction) direction);
}

static inline int sbus_map_sg(struct device *dev, struct scatterlist *sg,
			      int nents, int direction)
{
	return dma_map_sg(dev, sg, nents,
			  (enum dma_data_direction) direction);
}

static inline void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
				 int nents, int direction)
{
	dma_unmap_sg(dev, sg, nents,
		     (enum dma_data_direction) direction);
}

/* Finally, allow explicit synchronization of streamable mappings. */
static inline void sbus_dma_sync_single_for_cpu(struct device *dev,
						dma_addr_t dma_handle,
						size_t size, int direction)
{
	dma_sync_single_for_cpu(dev, dma_handle, size,
				(enum dma_data_direction) direction);
}

static inline void sbus_dma_sync_single_for_device(struct device *dev,
						   dma_addr_t dma_handle,
						   size_t size, int direction)
{
	/* No flushing needed to sync cpu writes to the device.  */
}

extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
+0 −8
Original line number Diff line number Diff line
@@ -155,14 +155,6 @@ EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
#ifdef CONFIG_SBUS
EXPORT_SYMBOL(sbus_root);
EXPORT_SYMBOL(sbus_set_sbus64);
EXPORT_SYMBOL(sbus_alloc_consistent);
EXPORT_SYMBOL(sbus_free_consistent);
EXPORT_SYMBOL(sbus_map_single);
EXPORT_SYMBOL(sbus_unmap_single);
EXPORT_SYMBOL(sbus_map_sg);
EXPORT_SYMBOL(sbus_unmap_sg);
EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
EXPORT_SYMBOL(sbus_iounmap);
EXPORT_SYMBOL(sbus_ioremap);
#endif
+0 −8
Original line number Diff line number Diff line
@@ -162,14 +162,6 @@ EXPORT_SYMBOL(auxio_set_lte);
#ifdef CONFIG_SBUS
EXPORT_SYMBOL(sbus_root);
EXPORT_SYMBOL(sbus_set_sbus64);
EXPORT_SYMBOL(sbus_alloc_consistent);
EXPORT_SYMBOL(sbus_free_consistent);
EXPORT_SYMBOL(sbus_map_single);
EXPORT_SYMBOL(sbus_unmap_single);
EXPORT_SYMBOL(sbus_map_sg);
EXPORT_SYMBOL(sbus_unmap_sg);
EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
#endif
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
+8 −8
Original line number Diff line number Diff line
@@ -680,7 +680,7 @@ fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int d
{
    struct sbus_dev *sdev = fore200e->bus_dev;
    struct device *dev = &sdev->ofdev.dev;
    u32 dma_addr = sbus_map_single(dev, virt_addr, size, direction);
    u32 dma_addr = dma_map_single(dev, virt_addr, size, direction);

    DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
	    virt_addr, size, direction, dma_addr);
@@ -698,7 +698,7 @@ fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int di
    DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
	    dma_addr, size, direction);

    sbus_unmap_single(dev, dma_addr, size, direction);
    dma_unmap_single(dev, dma_addr, size, direction);
}


@@ -710,7 +710,7 @@ fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size,

    DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
    
    sbus_dma_sync_single_for_cpu(dev, dma_addr, size, direction);
    dma_sync_single_for_cpu(dev, dma_addr, size, direction);
}

static void
@@ -721,7 +721,7 @@ fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int si

    DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);

    sbus_dma_sync_single_for_device(dev, dma_addr, size, direction);
    dma_sync_single_for_device(dev, dma_addr, size, direction);
}


@@ -738,8 +738,8 @@ fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
    chunk->alloc_size = chunk->align_size = size * nbr;

    /* returned chunks are page-aligned */
    chunk->alloc_addr = sbus_alloc_consistent(dev, chunk->alloc_size,
					      &chunk->dma_addr);
    chunk->alloc_addr = dma_alloc_coherent(dev, chunk->alloc_size,
					   &chunk->dma_addr, GFP_ATOMIC);

    if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
	return -ENOMEM;
@@ -758,7 +758,7 @@ fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
    struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
    struct device *dev = &sdev->ofdev.dev;

    sbus_free_consistent(dev, chunk->alloc_size,
    dma_free_coherent(dev, chunk->alloc_size,
		      chunk->alloc_addr, chunk->dma_addr);
}

Loading