Loading include/asm-ppc/dma-mapping.h→include/asm-powerpc/dma-mapping.h +93 −35 Original line number Diff line number Diff line /* * This is based on both include/asm-sh/dma-mapping.h and * include/asm-ppc/pci.h * Copyright (C) 2004 IBM * * Implements the generic device dma API for powerpc. * the pci and vio busses */ #ifndef __ASM_PPC_DMA_MAPPING_H #define __ASM_PPC_DMA_MAPPING_H #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H #include <linux/config.h> #include <linux/types.h> #include <linux/cache.h> /* need struct page definitions */ #include <linux/mm.h> #include <asm/scatterlist.h> #include <asm/io.h> #include <asm/bug.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) #ifdef CONFIG_NOT_COHERENT_CACHE /* Loading Loading @@ -37,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, #endif /* ! CONFIG_NOT_COHERENT_CACHE */ #ifdef CONFIG_PPC64 extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 dma_mask); extern void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction); #else /* CONFIG_PPC64 */ #define dma_supported(dev, mask) (1) static inline int dma_set_mask(struct device *dev, u64 dma_mask) Loading Loading @@ -134,28 +165,26 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, /* We don't do anything here. */ #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, #endif /* CONFIG_PPC64 */ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); __dma_sync(bus_to_virt(dma_handle), size, direction); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); __dma_sync(bus_to_virt(dma_handle), size, direction); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; Loading @@ -166,8 +195,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, __dma_sync_page(sg->page, sg->offset, sg->length, direction); } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; Loading @@ -178,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, __dma_sync_page(sg->page, sg->offset, sg->length, direction); } static inline int dma_mapping_error(dma_addr_t dma_addr) { #ifdef CONFIG_PPC64 return (dma_addr == DMA_ERROR_CODE); #else return 0; #endif } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #ifdef CONFIG_NOT_COHERENT_CACHE Loading @@ -188,25 +226,29 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, static inline int dma_get_cache_alignment(void) { #ifdef CONFIG_PPC64 /* no easy way to get cache size on all processors, so return * the maximum possible, to be safe */ return (1 << L1_CACHE_SHIFT_MAX); #else /* * Each processor family will define its own L1_CACHE_SHIFT, * L1_CACHE_BYTES wraps to this, so this is always safe. */ return L1_CACHE_BYTES; #endif } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { /* just sync everything for now */ dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { /* just sync everything for now */ Loading @@ -216,12 +258,28 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); __dma_sync(vaddr, size, (int)direction); } static inline int dma_mapping_error(dma_addr_t dma_addr) { return 0; } #endif /* __ASM_PPC_DMA_MAPPING_H */ /* * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO */ struct dma_mapping_ops { void * (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction); void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); int (*dma_supported)(struct device *dev, u64 mask); int (*dac_dma_supported)(struct device *dev, u64 mask); }; #endif /* _ASM_DMA_MAPPING_H */ include/asm-ppc64/dma-mapping.hdeleted 100644 → 0 +0 −136 Original line number Diff line number Diff line /* Copyright (C) 2004 IBM * * Implements the generic device dma API for ppc64. Handles * the pci and vio busses */ #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H #include <linux/types.h> #include <linux/cache.h> /* need struct page definitions */ #include <linux/mm.h> #include <asm/scatterlist.h> #include <asm/bug.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 dma_mask); extern void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction); static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline int dma_mapping_error(dma_addr_t dma_addr) { return (dma_addr == DMA_ERROR_CODE); } /* Now for the API extensions over the pci_ one */ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_is_consistent(d) (1) static inline int dma_get_cache_alignment(void) { /* no easy way to get cache size on all processors, so return * the maximum possible, to be safe */ return (1 << L1_CACHE_SHIFT_MAX); } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } /* * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO */ struct dma_mapping_ops { void * (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction); void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); int (*dma_supported)(struct device *dev, u64 mask); int (*dac_dma_supported)(struct device *dev, u64 mask); }; #endif /* _ASM_DMA_MAPPING_H */ Loading
include/asm-ppc/dma-mapping.h→include/asm-powerpc/dma-mapping.h +93 −35 Original line number Diff line number Diff line /* * This is based on both include/asm-sh/dma-mapping.h and * include/asm-ppc/pci.h * Copyright (C) 2004 IBM * * Implements the generic device dma API for powerpc. * the pci and vio busses */ #ifndef __ASM_PPC_DMA_MAPPING_H #define __ASM_PPC_DMA_MAPPING_H #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H #include <linux/config.h> #include <linux/types.h> #include <linux/cache.h> /* need struct page definitions */ #include <linux/mm.h> #include <asm/scatterlist.h> #include <asm/io.h> #include <asm/bug.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) #ifdef CONFIG_NOT_COHERENT_CACHE /* Loading Loading @@ -37,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, #endif /* ! CONFIG_NOT_COHERENT_CACHE */ #ifdef CONFIG_PPC64 extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 dma_mask); extern void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction); #else /* CONFIG_PPC64 */ #define dma_supported(dev, mask) (1) static inline int dma_set_mask(struct device *dev, u64 dma_mask) Loading Loading @@ -134,28 +165,26 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, /* We don't do anything here. */ #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, #endif /* CONFIG_PPC64 */ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); __dma_sync(bus_to_virt(dma_handle), size, direction); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); __dma_sync(bus_to_virt(dma_handle), size, direction); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; Loading @@ -166,8 +195,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, __dma_sync_page(sg->page, sg->offset, sg->length, direction); } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; Loading @@ -178,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, __dma_sync_page(sg->page, sg->offset, sg->length, direction); } static inline int dma_mapping_error(dma_addr_t dma_addr) { #ifdef CONFIG_PPC64 return (dma_addr == DMA_ERROR_CODE); #else return 0; #endif } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #ifdef CONFIG_NOT_COHERENT_CACHE Loading @@ -188,25 +226,29 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, static inline int dma_get_cache_alignment(void) { #ifdef CONFIG_PPC64 /* no easy way to get cache size on all processors, so return * the maximum possible, to be safe */ return (1 << L1_CACHE_SHIFT_MAX); #else /* * Each processor family will define its own L1_CACHE_SHIFT, * L1_CACHE_BYTES wraps to this, so this is always safe. */ return L1_CACHE_BYTES; #endif } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { /* just sync everything for now */ dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { /* just sync everything for now */ Loading @@ -216,12 +258,28 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); __dma_sync(vaddr, size, (int)direction); } static inline int dma_mapping_error(dma_addr_t dma_addr) { return 0; } #endif /* __ASM_PPC_DMA_MAPPING_H */ /* * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO */ struct dma_mapping_ops { void * (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction); void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); int (*dma_supported)(struct device *dev, u64 mask); int (*dac_dma_supported)(struct device *dev, u64 mask); }; #endif /* _ASM_DMA_MAPPING_H */
include/asm-ppc64/dma-mapping.hdeleted 100644 → 0 +0 −136 Original line number Diff line number Diff line /* Copyright (C) 2004 IBM * * Implements the generic device dma API for ppc64. Handles * the pci and vio busses */ #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H #include <linux/types.h> #include <linux/cache.h> /* need struct page definitions */ #include <linux/mm.h> #include <asm/scatterlist.h> #include <asm/bug.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 dma_mask); extern void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction); static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline int dma_mapping_error(dma_addr_t dma_addr) { return (dma_addr == DMA_ERROR_CODE); } /* Now for the API extensions over the pci_ one */ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_is_consistent(d) (1) static inline int dma_get_cache_alignment(void) { /* no easy way to get cache size on all processors, so return * the maximum possible, to be safe */ return (1 << L1_CACHE_SHIFT_MAX); } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); /* nothing to do */ } /* * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO */ struct dma_mapping_ops { void * (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction); void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); int (*dma_supported)(struct device *dev, u64 mask); int (*dac_dma_supported)(struct device *dev, u64 mask); }; #endif /* _ASM_DMA_MAPPING_H */