Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 356da6d0 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-mapping: bypass indirect calls for dma-direct



Avoid expensive indirect calls in the fast path DMA mapping
operations by directly calling the dma_direct_* ops if we are using
the directly mapped DMA operations.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Tested-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Tested-by: default avatarTony Luck <tony.luck@intel.com>
parent 190d4e59
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_ALPHA_JENSEN
	return &dma_direct_ops;
	return NULL;
#else
	return &alpha_pci_ops;
#endif
+1 −1
Original line number Diff line number Diff line
@@ -1280,7 +1280,7 @@ void __init arc_cache_init_master(void)
	/*
	 * In case of IOC (say IOC+SLC case), pointers above could still be set
	 * but end up not being relevant as the first function in chain is not
	 * called at all for @dma_direct_ops
	 * called at all for devices using coherent DMA.
	 *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
	 */
}
+1 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;

static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
	return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
	return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
}

#ifdef __arch_page_to_dma
+3 −11
Original line number Diff line number Diff line
@@ -22,7 +22,7 @@
#include "dma.h"

/*
 *  dma_direct_ops is used if
 *  The generic direct mapping code is used if
 *   - MMU/MPU is off
 *   - cpu is v7m w/o cache support
 *   - device is coherent
@@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = {
};
EXPORT_SYMBOL(arm_nommu_dma_ops);

static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
{
	return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
}

void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
			const struct iommu_ops *iommu, bool coherent)
{
	const struct dma_map_ops *dma_ops;

	if (IS_ENABLED(CONFIG_CPU_V7M)) {
		/*
		 * Cache support for v7m is optional, so can be treated as
@@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
		dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
	}

	dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);

	set_dma_ops(dev, dma_ops);
	if (!dev->archdata.dma_coherent)
		set_dma_ops(dev, &arm_nommu_dma_ops);
}
+0 −3
Original line number Diff line number Diff line
@@ -462,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
			const struct iommu_ops *iommu, bool coherent)
{
	if (!dev->dma_ops)
		dev->dma_ops = &dma_direct_ops;

	dev->dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);

Loading