Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83a9f86e authored by Liam Mark's avatar Liam Mark Committed by Patrick Daly
Browse files

iommu: dma-mapping-fast: per-buffer coherent mappings



For "fast" stage 1 mappings add support to either force a buffer to
be mapped as coherent through the DMA_ATTR_FORCE_COHERENT DMA
attribute, or to force a buffer to not be mapped as coherent by
using the DMA_ATTR_FORCE_NON_COHERENT DMA attribute.

Both the DMA_ATTR_FORCE_COHERENT and DMA_ATTR_FORCE_NON_COHERENT DMA
attributes override the buffer coherency configuration set by making
the device coherent.

Change-Id: I465a88ee568fb6cd8206c26a62cbf02ac40328fa
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent b36b6c1b
Loading
Loading
Loading
Loading
+43 −6
Original line number Diff line number Diff line
@@ -25,6 +25,13 @@
#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
#define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
#define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
#define FAST_MAIR_ATTR_IDX_CACHE	1
#define FAST_PTE_ATTRINDX_SHIFT		2
#define FAST_PTE_ATTRINDX_MASK		0x7
#define FAST_PTE_SH_SHIFT		8
#define FAST_PTE_SH_MASK	   (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
#define FAST_PTE_SH_OS             (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
#define FAST_PTE_SH_IS             (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)

static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
				 bool coherent)
@@ -56,6 +63,36 @@ static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
		dmac_clean_range(start, end);
}

static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
{
	int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
			FAST_PTE_ATTRINDX_SHIFT)) >>
			FAST_PTE_ATTRINDX_SHIFT;

	if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
		(((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
		  (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
		return true;

	return false;
}

static bool is_dma_coherent(struct device *dev, unsigned long attrs)
{
	bool is_coherent;

	if (attrs & DMA_ATTR_FORCE_COHERENT)
		is_coherent = true;
	else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
		is_coherent = false;
	else if (is_device_dma_coherent(dev))
		is_coherent = true;
	else
		is_coherent = false;

	return is_coherent;
}

/*
 * Checks if the allocated range (ending at @end) covered the upcoming
 * stale bit.  We don't need to know exactly where the range starts since
@@ -313,7 +350,7 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
	int nptes = len >> FAST_PAGE_SHIFT;
	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
	int prot = __fast_dma_direction_to_prot(dir);
	bool is_coherent = is_device_dma_coherent(dev);
	bool is_coherent = is_dma_coherent(dev, attrs);

	prot = __get_iommu_pgprot(attrs, prot, is_coherent);

@@ -357,7 +394,7 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
	int nptes = len >> FAST_PAGE_SHIFT;
	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
	bool is_coherent = is_device_dma_coherent(dev);
	bool is_coherent = is_dma_coherent(dev, attrs);

	if (!skip_sync && !is_coherent)
		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
@@ -377,7 +414,7 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
	unsigned long offset = iova & ~FAST_PAGE_MASK;
	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));

	if (!is_device_dma_coherent(dev))
	if (!__fast_is_pte_coherent(pmd))
		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
}

@@ -389,7 +426,7 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
	unsigned long offset = iova & ~FAST_PAGE_MASK;
	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));

	if (!is_device_dma_coherent(dev))
	if (!__fast_is_pte_coherent(pmd))
		__fast_dma_page_cpu_to_dev(page, offset, size, dir);
}

@@ -469,7 +506,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
	struct sg_mapping_iter miter;
	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
	int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
	bool is_coherent = is_device_dma_coherent(dev);
	bool is_coherent = is_dma_coherent(dev, attrs);
	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
	struct page **pages;

@@ -591,7 +628,7 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
	unsigned long uaddr = vma->vm_start;
	struct page **pages;
	int i, nr_pages, ret = 0;
	bool coherent = is_device_dma_coherent(dev);
	bool coherent = is_dma_coherent(dev, attrs);

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     coherent);