Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6fa1d28e authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

sh: use generic dma_noncoherent_ops



Switch to the generic noncoherent direct mapping implementation.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarYoshinori Sato <ysato@users.sourceforge.jp>
parent 46bcde94
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -51,7 +51,6 @@ config SUPERH
	select HAVE_ARCH_AUDITSYSCALL
	select HAVE_ARCH_AUDITSYSCALL
	select HAVE_FUTEX_CMPXCHG if FUTEX
	select HAVE_FUTEX_CMPXCHG if FUTEX
	select HAVE_NMI
	select HAVE_NMI
	select NEED_DMA_MAP_STATE
	select NEED_SG_DMA_LENGTH
	select NEED_SG_DMA_LENGTH


	help
	help
@@ -164,6 +163,8 @@ config DMA_COHERENT


config DMA_NONCOHERENT
config DMA_NONCOHERENT
	def_bool !DMA_COHERENT
	def_bool !DMA_COHERENT
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select DMA_NONCOHERENT_OPS


config PGTABLE_LEVELS
config PGTABLE_LEVELS
	default 3 if X2TLB
	default 3 if X2TLB
+1 −0
Original line number Original line Diff line number Diff line
@@ -2,6 +2,7 @@ generic-y += compat.h
generic-y += current.h
generic-y += current.h
generic-y += delay.h
generic-y += delay.h
generic-y += div64.h
generic-y += div64.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += exec.h
generic-y += irq_regs.h
generic-y += irq_regs.h

arch/sh/include/asm/dma-mapping.h

deleted100644 → 0
+0 −26
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H

extern const struct dma_map_ops nommu_dma_ops;

static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_DMA_NONCOHERENT
	return &nommu_dma_ops;
#else
	return &dma_direct_ops;
#endif
}

extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
					dma_addr_t *dma_addr, gfp_t flag,
					unsigned long attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
				      void *vaddr, dma_addr_t dma_handle,
				      unsigned long attrs);

void sh_sync_dma_for_device(void *vaddr, size_t size,
	    enum dma_data_direction dir);

#endif /* __ASM_SH_DMA_MAPPING_H */
+1 −1
Original line number Original line Diff line number Diff line
@@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION)	+= swsusp.o
obj-$(CONFIG_HIBERNATION)	+= swsusp.o
obj-$(CONFIG_DWARF_UNWINDER)	+= dwarf.o
obj-$(CONFIG_DWARF_UNWINDER)	+= dwarf.o
obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_callchain.o
obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_callchain.o
obj-$(CONFIG_DMA_NONCOHERENT)	+= dma-nommu.o dma-coherent.o
obj-$(CONFIG_DMA_NONCOHERENT)	+= dma-coherent.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT)		+= hw_breakpoint.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT)		+= hw_breakpoint.o


ccflags-y := -Werror
ccflags-y := -Werror
+11 −12
Original line number Original line Diff line number Diff line
@@ -7,14 +7,13 @@
 */
 */
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/dma-noncoherent.h>
#include <linux/module.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
#include <asm/addrspace.h>


void *dma_generic_alloc_coherent(struct device *dev, size_t size,
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
				 dma_addr_t *dma_handle, gfp_t gfp,
		gfp_t gfp, unsigned long attrs)
				 unsigned long attrs)
{
{
	void *ret, *ret_nocache;
	void *ret, *ret_nocache;
	int order = get_order(size);
	int order = get_order(size);
@@ -29,7 +28,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
	 * Pages from the page allocator may have data present in
	 * Pages from the page allocator may have data present in
	 * cache. So flush the cache before using uncached memory.
	 * cache. So flush the cache before using uncached memory.
	 */
	 */
	sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
	arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
			DMA_BIDIRECTIONAL);


	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
	if (!ret_nocache) {
	if (!ret_nocache) {
@@ -46,9 +46,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
	return ret_nocache;
	return ret_nocache;
}
}


void dma_generic_free_coherent(struct device *dev, size_t size,
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
			       void *vaddr, dma_addr_t dma_handle,
		dma_addr_t dma_handle, unsigned long attrs)
			       unsigned long attrs)
{
{
	int order = get_order(size);
	int order = get_order(size);
	unsigned long pfn = (dma_handle >> PAGE_SHIFT);
	unsigned long pfn = (dma_handle >> PAGE_SHIFT);
@@ -63,12 +62,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
	iounmap(vaddr);
	iounmap(vaddr);
}
}


void sh_sync_dma_for_device(void *vaddr, size_t size,
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		    enum dma_data_direction direction)
		size_t size, enum dma_data_direction dir)
{
{
	void *addr = sh_cacheop_vaddr(vaddr);
	void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));


	switch (direction) {
	switch (dir) {
	case DMA_FROM_DEVICE:		/* invalidate only */
	case DMA_FROM_DEVICE:		/* invalidate only */
		__flush_invalidate_region(addr, size);
		__flush_invalidate_region(addr, size);
		break;
		break;
Loading