Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4a4330d authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

swiotlb: add support for non-coherent DMA



Handle architectures that are not cache coherent directly in the main
swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right
places from the various dma_map/unmap/sync methods when the device is
non-coherent.

Because swiotlb now uses dma_direct_alloc for the coherent allocation
that side is already taken care of by the dma-direct code calling into
arch_dma_{alloc,free} for devices that are non-coherent.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent fafadcd1
Loading
Loading
Loading
Loading
+23 −10
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@

#include <linux/cache.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
	if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
		return dev_addr;

	if (!dma_capable(dev, dev_addr, size) ||
	    swiotlb_force == SWIOTLB_FORCE) {
		trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
	return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
		dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
	}

	if (!dev_is_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		arch_sync_dma_for_device(dev, phys, size, dir);

	return dev_addr;
}

/*
@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,

	BUG_ON(dir == DMA_NONE);

	if (!dev_is_dma_coherent(hwdev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		arch_sync_dma_for_cpu(hwdev, paddr, size, dir);

	if (is_swiotlb_buffer(paddr)) {
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
		return;
@@ -730,14 +741,16 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,

	BUG_ON(dir == DMA_NONE);

	if (is_swiotlb_buffer(paddr)) {
	if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
		arch_sync_dma_for_cpu(hwdev, paddr, size, dir);

	if (is_swiotlb_buffer(paddr))
		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;
	if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
		arch_sync_dma_for_device(hwdev, paddr, size, dir);

	if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
		dma_mark_clean(phys_to_virt(paddr), size);
}