Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e6850f6 authored by Paul Mackerras's avatar Paul Mackerras
Browse files

Merge branch 'mymerge' of ssh://ozlabs.org/home/sfr/kernel-sfr

parents e5356640 78b09735
Loading
Loading
Loading
Loading
+93 −45
Original line number Original line Diff line number Diff line
/*
/*
 * This is based on both include/asm-sh/dma-mapping.h and
 * Copyright (C) 2004 IBM
 * include/asm-ppc/pci.h
 *
 * Implements the generic device dma API for powerpc.
 * the pci and vio busses
 */
 */
#ifndef __ASM_PPC_DMA_MAPPING_H
#ifndef _ASM_DMA_MAPPING_H
#define __ASM_PPC_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H


#include <linux/config.h>
#include <linux/config.h>
#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/io.h>
#include <asm/bug.h>

#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)


#ifdef CONFIG_NOT_COHERENT_CACHE
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
/*
@@ -24,22 +31,12 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
extern void __dma_sync_page(struct page *page, unsigned long offset,
				 size_t size, int direction);
				 size_t size, int direction);
#define dma_cache_inv(_start,_size) \
	invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
	clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
	flush_dcache_range(_start, (_start + _size))


#else /* ! CONFIG_NOT_COHERENT_CACHE */
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
/*
 * Cache coherent cores.
 * Cache coherent cores.
 */
 */


#define dma_cache_inv(_start,_size)		do { } while (0)
#define dma_cache_wback(_start,_size)		do { } while (0)
#define dma_cache_wback_inv(_start,_size)	do { } while (0)

#define __dma_alloc_coherent(gfp, size, handle)	NULL
#define __dma_alloc_coherent(gfp, size, handle)	NULL
#define __dma_free_coherent(size, addr)		do { } while (0)
#define __dma_free_coherent(size, addr)		do { } while (0)
#define __dma_sync(addr, size, rw)		do { } while (0)
#define __dma_sync(addr, size, rw)		do { } while (0)
@@ -47,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,


#endif /* ! CONFIG_NOT_COHERENT_CACHE */
#endif /* ! CONFIG_NOT_COHERENT_CACHE */


#ifdef CONFIG_PPC64

extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t flag);
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction direction);
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction direction);
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size,
		enum dma_data_direction direction);
extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
		size_t size, enum dma_data_direction direction);
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction direction);
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nhwentries, enum dma_data_direction direction);

#else /* CONFIG_PPC64 */

#define dma_supported(dev, mask)	(1)
#define dma_supported(dev, mask)	(1)


static inline int dma_set_mask(struct device *dev, u64 dma_mask)
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
@@ -144,28 +165,26 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
/* We don't do anything here. */
/* We don't do anything here. */
#define dma_unmap_sg(dev, sg, nents, dir)	do { } while (0)
#define dma_unmap_sg(dev, sg, nents, dir)	do { } while (0)


static inline void
#endif /* CONFIG_PPC64 */
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,

			size_t size,
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	BUG_ON(direction == DMA_NONE);
	BUG_ON(direction == DMA_NONE);

	__dma_sync(bus_to_virt(dma_handle), size, direction);
	__dma_sync(bus_to_virt(dma_handle), size, direction);
}
}


static inline void
static inline void dma_sync_single_for_device(struct device *dev,
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
		dma_addr_t dma_handle, size_t size,
			size_t size,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	BUG_ON(direction == DMA_NONE);
	BUG_ON(direction == DMA_NONE);

	__dma_sync(bus_to_virt(dma_handle), size, direction);
	__dma_sync(bus_to_virt(dma_handle), size, direction);
}
}


static inline void
static inline void dma_sync_sg_for_cpu(struct device *dev,
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
		struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	int i;
	int i;
@@ -176,8 +195,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
}


static inline void
static inline void dma_sync_sg_for_device(struct device *dev,
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
		struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	int i;
	int i;
@@ -188,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
}


static inline int dma_mapping_error(dma_addr_t dma_addr)
{
#ifdef CONFIG_PPC64
	return (dma_addr == DMA_ERROR_CODE);
#else
	return 0;
#endif
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -198,25 +226,29 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,


static inline int dma_get_cache_alignment(void)
static inline int dma_get_cache_alignment(void)
{
{
#ifdef CONFIG_PPC64
	/* no easy way to get cache size on all processors, so return
	 * the maximum possible, to be safe */
	return (1 << L1_CACHE_SHIFT_MAX);
#else
	/*
	/*
	 * Each processor family will define its own L1_CACHE_SHIFT,
	 * Each processor family will define its own L1_CACHE_SHIFT,
	 * L1_CACHE_BYTES wraps to this, so this is always safe.
	 * L1_CACHE_BYTES wraps to this, so this is always safe.
	 */
	 */
	return L1_CACHE_BYTES;
	return L1_CACHE_BYTES;
#endif
}
}


static inline void
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
		dma_addr_t dma_handle, unsigned long offset, size_t size,
		      	      unsigned long offset, size_t size,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	/* just sync everything for now */
	/* just sync everything for now */
	dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
	dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}
}


static inline void
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
		dma_addr_t dma_handle, unsigned long offset, size_t size,
		    		 unsigned long offset, size_t size,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	/* just sync everything for now */
	/* just sync everything for now */
@@ -226,12 +258,28 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
static inline void dma_cache_sync(void *vaddr, size_t size,
static inline void dma_cache_sync(void *vaddr, size_t size,
		enum dma_data_direction direction)
		enum dma_data_direction direction)
{
{
	BUG_ON(direction == DMA_NONE);
	__dma_sync(vaddr, size, (int)direction);
	__dma_sync(vaddr, size, (int)direction);
}
}


static inline int dma_mapping_error(dma_addr_t dma_addr)
/*
{
 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
	return 0;
 */
}
struct dma_mapping_ops {

	void *		(*alloc_coherent)(struct device *dev, size_t size,
#endif				/* __ASM_PPC_DMA_MAPPING_H */
				dma_addr_t *dma_handle, gfp_t flag);
	void		(*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
				size_t size, enum dma_data_direction direction);
	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
				size_t size, enum dma_data_direction direction);
	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
				int nents, enum dma_data_direction direction);
	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
				int nents, enum dma_data_direction direction);
	int		(*dma_supported)(struct device *dev, u64 mask);
	int		(*dac_dma_supported)(struct device *dev, u64 mask);
};

#endif	/* _ASM_DMA_MAPPING_H */
+17 −0
Original line number Original line Diff line number Diff line
@@ -545,6 +545,23 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#include <asm/mpc8260_pci9.h>
#endif
#endif


#ifdef CONFIG_NOT_COHERENT_CACHE

#define dma_cache_inv(_start,_size) \
	invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
	clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
	flush_dcache_range(_start, (_start + _size))

#else

#define dma_cache_inv(_start,_size)		do { } while (0)
#define dma_cache_wback(_start,_size)		do { } while (0)
#define dma_cache_wback_inv(_start,_size)	do { } while (0)

#endif

/*
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 * access

include/asm-ppc64/dma-mapping.h

deleted100644 → 0
+0 −136
Original line number Original line Diff line number Diff line
/* Copyright (C) 2004 IBM
 *
 * Implements the generic device dma API for ppc64. Handles
 * the pci and vio busses
 */

#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H

#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/bug.h>

#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)

extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t flag);
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction direction);
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction direction);
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size,
		enum dma_data_direction direction);
extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
		size_t size, enum dma_data_direction direction);
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction direction);
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nhwentries, enum dma_data_direction direction);

static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
			enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
			   enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
		    enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
		       enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

static inline int dma_mapping_error(dma_addr_t dma_addr)
{
	return (dma_addr == DMA_ERROR_CODE);
}

/* Now for the API extensions over the pci_ one */

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d)	(1)

static inline int
dma_get_cache_alignment(void)
{
	/* no easy way to get cache size on all processors, so return
	 * the maximum possible, to be safe */
	return (1 << L1_CACHE_SHIFT_MAX);
}

static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
			      unsigned long offset, size_t size,
			      enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
				 unsigned long offset, size_t size,
				 enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

static inline void
dma_cache_sync(void *vaddr, size_t size,
	       enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
	/* nothing to do */
}

/*
 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
 */
struct dma_mapping_ops {
	void *		(*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t flag);
	void		(*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
				size_t size, enum dma_data_direction direction);
	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
				size_t size, enum dma_data_direction direction);
	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
				int nents, enum dma_data_direction direction);
	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
				int nents, enum dma_data_direction direction);
	int		(*dma_supported)(struct device *dev, u64 mask);
	int		(*dac_dma_supported)(struct device *dev, u64 mask);
};

#endif	/* _ASM_DMA_MAPPING_H */