Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f32154c9 authored by Paul Mundt's avatar Paul Mundt
Browse files

sh: Add dma-mapping support for dma_alloc/free_coherent() overrides.



This moves the current dma_alloc/free_coherent() calls to a generic
variant and plugs them in for the nommu default. Other variants can
override the defaults in the dma mapping ops directly.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 73c926be
Loading
Loading
Loading
Loading
+40 −8
Original line number Diff line number Diff line
@@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
	return dma_ops;
}

#include <asm-generic/dma-coherent.h>
#include <asm-generic/dma-mapping-common.h>

static inline int dma_supported(struct device *dev, u64 mask)
{
	struct dma_map_ops *ops = get_dma_ops(dev);
@@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
	return 0;
}

void *dma_alloc_coherent(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t flag);

void dma_free_coherent(struct device *dev, size_t size,
		       void *vaddr, dma_addr_t dma_handle);

void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
		    enum dma_data_direction dir);

@@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
	return dma_addr == 0;
}

#include <asm-generic/dma-coherent.h>
#include <asm-generic/dma-mapping-common.h>
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *dma_handle, gfp_t gfp)
{
	struct dma_map_ops *ops = get_dma_ops(dev);
	void *memory;

	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
		return memory;
	if (!ops->alloc_coherent)
		return NULL;

	memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);

	return memory;
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *vaddr, dma_addr_t dma_handle)
{
	struct dma_map_ops *ops = get_dma_ops(dev);

	WARN_ON(irqs_disabled());	/* for portability */

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	debug_dma_free_coherent(dev, size, vaddr, dma_handle);
	if (ops->free_coherent)
		ops->free_coherent(dev, size, vaddr, dma_handle);
}

/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
					dma_addr_t *dma_addr, gfp_t flag);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
				      void *vaddr, dma_addr_t dma_handle);

#endif /* __ASM_SH_DMA_MAPPING_H */
+2 −0
Original line number Diff line number Diff line
@@ -61,6 +61,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
}

struct dma_map_ops nommu_dma_ops = {
	.alloc_coherent		= dma_generic_alloc_coherent,
	.free_coherent		= dma_generic_free_coherent,
	.map_page		= nommu_map_page,
	.map_sg			= nommu_map_sg,
	.sync_single_for_device	= nommu_sync_single,
+5 −17
Original line number Diff line number Diff line
@@ -33,15 +33,12 @@ static int __init dma_init(void)
}
fs_initcall(dma_init);

void *dma_alloc_coherent(struct device *dev, size_t size,
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret, *ret_nocache;
	int order = get_order(size);

	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

	ret = (void *)__get_free_pages(gfp, order);
	if (!ret)
		return NULL;
@@ -63,30 +60,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,

	*dma_handle = virt_to_phys(ret);

	debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);

	return ret_nocache;
}
EXPORT_SYMBOL(dma_alloc_coherent);

void dma_free_coherent(struct device *dev, size_t size,
void dma_generic_free_coherent(struct device *dev, size_t size,
			       void *vaddr, dma_addr_t dma_handle)
{
	int order = get_order(size);
	unsigned long pfn = dma_handle >> PAGE_SHIFT;
	int k;

	WARN_ON(irqs_disabled());	/* for portability */

	if (dma_release_from_coherent(dev, order, vaddr))
		return;

	debug_dma_free_coherent(dev, size, vaddr, dma_handle);
	for (k = 0; k < (1 << order); k++)
		__free_pages(pfn_to_page(pfn + k), 0);

	iounmap(vaddr);
}
EXPORT_SYMBOL(dma_free_coherent);

void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
		    enum dma_data_direction direction)