Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7146988 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu: io-pgtable-fast: Separate dma and io-pagetable layers"

parents b32860ae 913c3860
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3152,7 +3152,7 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
			ret = -ENODEV;
			break;
		}
		info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
		info->ops = smmu_domain->pgtbl_ops;
		ret = 0;
		break;
	}
+44 −75
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <trace/events/iommu.h>
#include "io-pgtable.h"

#include <soc/qcom/secure_buffer.h>
#include <linux/arm-smmu-errata.h>
@@ -22,14 +23,6 @@
#define FAST_PAGE_SHIFT		12
#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
#define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
#define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
#define FAST_MAIR_ATTR_IDX_CACHE	1
#define FAST_PTE_ATTRINDX_SHIFT		2
#define FAST_PTE_ATTRINDX_MASK		0x7
#define FAST_PTE_SH_SHIFT		8
#define FAST_PTE_SH_MASK	   (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
#define FAST_PTE_SH_OS             (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
#define FAST_PTE_SH_IS             (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)

static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
				 bool coherent)
@@ -54,27 +47,6 @@ static int __get_iommu_pgprot(unsigned long attrs, int prot,
	return prot;
}

static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
				  void *start, void *end)
{
	if (!mapping->is_smmu_pt_coherent)
		dmac_clean_range(start, end);
}

static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
{
	int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
			FAST_PTE_ATTRINDX_SHIFT)) >>
			FAST_PTE_ATTRINDX_SHIFT;

	if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
		(((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
		  (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
		return true;

	return false;
}

static bool is_dma_coherent(struct device *dev, unsigned long attrs)
{
	bool is_coherent;
@@ -195,7 +167,7 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,

		iommu_tlbiall(mapping->domain);
		mapping->have_stale_tlbs = false;
		av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, skip_sync);
	}

	iova =  (bit << FAST_PAGE_SHIFT) + mapping->base;
@@ -368,12 +340,10 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
	dma_addr_t iova;
	unsigned long flags;
	av8l_fast_iopte *pmd;
	phys_addr_t phys_plus_off = page_to_phys(page) + offset;
	phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE);
	unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK;
	size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE);
	int nptes = len >> FAST_PAGE_SHIFT;
	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
	int prot = __fast_dma_direction_to_prot(dir);
	bool is_coherent = is_dma_coherent(dev, attrs);
@@ -391,13 +361,10 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
	if (unlikely(iova == DMA_ERROR_CODE))
		goto fail;

	pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);

	if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
	if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, iova,
					  phys_to_map, len, prot)))
		goto fail_free_iova;

	fast_dmac_clean_range(mapping, pmd, pmd + nptes);

	spin_unlock_irqrestore(&mapping->lock, flags);

	trace_map(mapping->domain, iova, phys_to_map, len, prot);
@@ -416,20 +383,23 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
{
	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
	unsigned long flags;
	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
	unsigned long offset = iova & ~FAST_PAGE_MASK;
	size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
	int nptes = len >> FAST_PAGE_SHIFT;
	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
	bool is_coherent = is_dma_coherent(dev, attrs);

	if (!skip_sync && !is_coherent)
		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
	if (!skip_sync && !is_coherent) {
		phys_addr_t phys;

		phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova);
		WARN_ON(!phys);

		__fast_dma_page_dev_to_cpu(phys_to_page(phys), offset,
						size, dir);
	}

	spin_lock_irqsave(&mapping->lock, flags);
	av8l_fast_unmap_public(pmd, len);
	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
	av8l_fast_unmap_public(mapping->pgtbl_ops, iova, len);
	__fast_smmu_free_iova(mapping, iova - offset, len);
	spin_unlock_irqrestore(&mapping->lock, flags);

@@ -440,24 +410,34 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
		dma_addr_t iova, size_t size, enum dma_data_direction dir)
{
	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
	unsigned long offset = iova & ~FAST_PAGE_MASK;
	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));

	if (!__fast_is_pte_coherent(pmd))
		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
	if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) {
		phys_addr_t phys;

		phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova);
		WARN_ON(!phys);

		__fast_dma_page_dev_to_cpu(phys_to_page(phys), offset,
						size, dir);
	}
}

static void fast_smmu_sync_single_for_device(struct device *dev,
		dma_addr_t iova, size_t size, enum dma_data_direction dir)
{
	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
	unsigned long offset = iova & ~FAST_PAGE_MASK;
	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));

	if (!__fast_is_pte_coherent(pmd))
		__fast_dma_page_cpu_to_dev(page, offset, size, dir);
	if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) {
		phys_addr_t phys;

		phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova);
		WARN_ON(!phys);

		__fast_dma_page_cpu_to_dev(phys_to_page(phys), offset,
						size, dir);
	}
}

static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
@@ -532,7 +512,6 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
	struct sg_table sgt;
	dma_addr_t dma_addr, iova_iter;
	void *addr;
	av8l_fast_iopte *ptep;
	unsigned long flags;
	struct sg_mapping_iter miter;
	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
@@ -580,17 +559,14 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
	sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
	while (sg_miter_next(&miter)) {
		int nptes = miter.length >> FAST_PAGE_SHIFT;

		ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter);
		if (unlikely(av8l_fast_map_public(
				     ptep, page_to_phys(miter.page),
				     mapping->pgtbl_ops, iova_iter,
				     page_to_phys(miter.page),
				     miter.length, prot))) {
			dev_err(dev, "no map public\n");
			/* TODO: unwind previously successful mappings */
			goto out_free_iova;
		}
		fast_dmac_clean_range(mapping, ptep, ptep + nptes);
		iova_iter += miter.length;
	}
	sg_miter_stop(&miter);
@@ -610,9 +586,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
out_unmap:
	/* need to take the lock again for page tables and iova */
	spin_lock_irqsave(&mapping->lock, flags);
	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
	av8l_fast_unmap_public(ptep, size);
	fast_dmac_clean_range(mapping, ptep, ptep + count);
	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_addr, size);
out_free_iova:
	__fast_smmu_free_iova(mapping, dma_addr, size);
	spin_unlock_irqrestore(&mapping->lock, flags);
@@ -631,7 +605,6 @@ static void fast_smmu_free(struct device *dev, size_t size,
	struct vm_struct *area;
	struct page **pages;
	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
	av8l_fast_iopte *ptep;
	unsigned long flags;

	size = ALIGN(size, SZ_4K);
@@ -642,10 +615,8 @@ static void fast_smmu_free(struct device *dev, size_t size,

	pages = area->pages;
	dma_common_free_remap(vaddr, size, VM_USERMAP, false);
	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
	spin_lock_irqsave(&mapping->lock, flags);
	av8l_fast_unmap_public(ptep, size);
	fast_dmac_clean_range(mapping, ptep, ptep + count);
	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size);
	__fast_smmu_free_iova(mapping, dma_handle, size);
	spin_unlock_irqrestore(&mapping->lock, flags);
	__fast_smmu_free_pages(pages, count);
@@ -751,16 +722,20 @@ static int fast_smmu_mapping_error(struct device *dev,
static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
					  void *data)
{
	av8l_fast_iopte *ptep = data;
	av8l_fast_iopte *pmds, *ptep = data;
	dma_addr_t iova;
	unsigned long bitmap_idx;
	struct io_pgtable *tbl;

	bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds);
	tbl  = container_of(fast->pgtbl_ops, struct io_pgtable, ops);
	pmds = tbl->cfg.av8l_fast_cfg.pmds;

	bitmap_idx = (unsigned long)(ptep - pmds);
	iova = bitmap_idx << FAST_PAGE_SHIFT;
	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
		fast->pgtbl_pmds, bitmap_idx);
		pmds, bitmap_idx);
	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
		       32, 8, fast->bitmap, fast->bitmap_size, false);
}
@@ -947,13 +922,7 @@ int fast_smmu_init_mapping(struct device *dev,
		err = -EINVAL;
		goto release_mapping;
	}
	mapping->fast->pgtbl_pmds = info.pmds;

	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
				  &mapping->fast->is_smmu_pt_coherent)) {
		err = -EINVAL;
		goto release_mapping;
	}
	mapping->fast->pgtbl_ops = (struct io_pgtable_ops *)info.ops;

	mapping->fast->notifier.notifier_call = fast_smmu_notify;
	av8l_register_notify(&mapping->fast->notifier);
+85 −35
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ struct av8l_fast_io_pgtable {
#define AV8L_FAST_PTE_SH_NS		(((av8l_fast_iopte)0) << 8)
#define AV8L_FAST_PTE_SH_OS		(((av8l_fast_iopte)2) << 8)
#define AV8L_FAST_PTE_SH_IS		(((av8l_fast_iopte)3) << 8)
#define AV8L_FAST_PTE_SH_MASK		(((av8l_fast_iopte)3) << 8)
#define AV8L_FAST_PTE_NS		(((av8l_fast_iopte)1) << 5)
#define AV8L_FAST_PTE_VALID		(((av8l_fast_iopte)1) << 0)

@@ -68,6 +69,7 @@ struct av8l_fast_io_pgtable {
#define AV8L_FAST_PTE_AP_PRIV_RO	(((av8l_fast_iopte)2) << 6)
#define AV8L_FAST_PTE_AP_RO		(((av8l_fast_iopte)3) << 6)
#define AV8L_FAST_PTE_ATTRINDX_SHIFT	2
#define AV8L_FAST_PTE_ATTRINDX_MASK	0x7
#define AV8L_FAST_PTE_nG		(((av8l_fast_iopte)1) << 11)

/* Stage-2 PTE */
@@ -135,6 +137,13 @@ struct av8l_fast_io_pgtable {

#define AV8L_FAST_PAGE_SHIFT		12

#define PTE_MAIR_IDX(pte)				\
	((pte >> AV8L_FAST_PTE_ATTRINDX_SHIFT) &&	\
	 AV8L_FAST_PTE_ATTRINDX_MASK)

#define PTE_SH_IDX(pte) (pte & AV8L_FAST_PTE_SH_MASK)

#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12))

#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB

@@ -163,10 +172,11 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
	}
}

void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync)
void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, bool skip_sync)
{
	int i;
	av8l_fast_iopte *pmdp = pmds;
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	av8l_fast_iopte *pmdp = data->pmds;

	for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) {
		if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
@@ -183,11 +193,18 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
}
#endif

/* caller must take care of cache maintenance on *ptep */
int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
			 int prot)
static void av8l_clean_range(struct io_pgtable_ops *ops,
			av8l_fast_iopte *start, av8l_fast_iopte *end)
{
	struct io_pgtable *iop = iof_pgtable_ops_to_pgtable(ops);

	if (!(iop->cfg.quirks & IO_PGTABLE_QUIRK_NO_DMA))
		dmac_clean_range(start, end);
}

static av8l_fast_iopte
av8l_fast_prot_to_pte(struct av8l_fast_io_pgtable *data, int prot)
{
	int i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
	av8l_fast_iopte pte = AV8L_FAST_PTE_XN
		| AV8L_FAST_PTE_TYPE_PAGE
		| AV8L_FAST_PTE_AF
@@ -209,58 +226,67 @@ int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
	else
		pte |= AV8L_FAST_PTE_AP_RW;

	return pte;
}

static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
			 phys_addr_t paddr, size_t size, int prot)
{
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
	unsigned long i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
	av8l_fast_iopte pte;

	pte = av8l_fast_prot_to_pte(data, prot);
	paddr &= AV8L_FAST_PTE_ADDR_MASK;
	for (i = 0; i < nptes; i++, paddr += SZ_4K) {
		__av8l_check_for_stale_tlb(ptep + i);
		*(ptep + i) = pte | paddr;
	}
	av8l_clean_range(ops, ptep, ptep + nptes);

	return 0;
}

static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova,
			 phys_addr_t paddr, size_t size, int prot)
{
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;

	av8l_fast_map_public(ptep, paddr, size, prot);
	dmac_clean_range(ptep, ptep + nptes);

	return 0;
	return av8l_fast_map(ops, iova, paddr, size, prot);
}

static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size,
			      bool need_stale_tlb_tracking)
static size_t
__av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
			size_t size, bool allow_stale_tlb)
{
	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
	int val = need_stale_tlb_tracking
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	unsigned long nptes;
	av8l_fast_iopte *ptep;
	int val = allow_stale_tlb
		? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI
		: 0;

	ptep = iopte_pmd_offset(data->pmds, iova);
	nptes = size >> AV8L_FAST_PAGE_SHIFT;

	memset(ptep, val, sizeof(*ptep) * nptes);
	av8l_clean_range(ops, ptep, ptep + nptes);
	if (!allow_stale_tlb)
		io_pgtable_tlb_flush_all(&data->iop);

	return size;
}

/* caller must take care of cache maintenance on *ptep */
void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
/* caller must take care of tlb cache maintenance */
void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova,
				size_t size)
{
	__av8l_fast_unmap(ptep, size, true);
	__av8l_fast_unmap(ops, iova, size, true);
}

static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
			      size_t size)
{
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	struct io_pgtable *iop = &data->iop;
	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;

	__av8l_fast_unmap(ptep, size, false);
	dmac_clean_range(ptep, ptep + nptes);
	io_pgtable_tlb_flush_all(iop);

	return size;
	return __av8l_fast_unmap(ops, iova, size, false);
}

#if defined(CONFIG_ARM64)
@@ -305,6 +331,12 @@ static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
	return phys | (iova & 0xfff);
}

phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
					  unsigned long iova)
{
	return av8l_fast_iova_to_phys(ops, iova);
}

static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
			    struct scatterlist *sg, unsigned int nents,
			    int prot, size_t *size)
@@ -312,6 +344,23 @@ static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
	return -ENODEV;
}

static bool av8l_fast_iova_coherent(struct io_pgtable_ops *ops,
					unsigned long iova)
{
	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);

	return ((PTE_MAIR_IDX(*ptep) == AV8L_FAST_MAIR_ATTR_IDX_CACHE) &&
		((PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_OS) ||
		 (PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_IS)));
}

bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops,
					unsigned long iova)
{
	return av8l_fast_iova_coherent(ops, iova);
}

static struct av8l_fast_io_pgtable *
av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
{
@@ -326,6 +375,7 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
		.map_sg		= av8l_fast_map_sg,
		.unmap		= av8l_fast_unmap,
		.iova_to_phys	= av8l_fast_iova_to_phys,
		.is_iova_coherent = av8l_fast_iova_coherent,
	};

	return data;
@@ -634,7 +684,7 @@ static int __init av8l_fast_positive_testing(void)
	}

	/* sweep up TLB proving PTEs */
	av8l_fast_clear_stale_ptes(pmds, false);
	av8l_fast_clear_stale_ptes(ops, false);

	/* map the entire 4GB VA space with 8K map calls */
	for (iova = 0; iova < max; iova += SZ_8K) {
@@ -655,7 +705,7 @@ static int __init av8l_fast_positive_testing(void)
	}

	/* sweep up TLB proving PTEs */
	av8l_fast_clear_stale_ptes(pmds, false);
	av8l_fast_clear_stale_ptes(ops, false);

	/* map the entire 4GB VA space with 16K map calls */
	for (iova = 0; iova < max; iova += SZ_16K) {
@@ -676,7 +726,7 @@ static int __init av8l_fast_positive_testing(void)
	}

	/* sweep up TLB proving PTEs */
	av8l_fast_clear_stale_ptes(pmds, false);
	av8l_fast_clear_stale_ptes(ops, false);

	/* map the entire 4GB VA space with 64K map calls */
	for (iova = 0; iova < max; iova += SZ_64K) {
+2 −3
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/io-pgtable-fast.h>

struct dma_iommu_mapping;
struct io_pgtable_ops;

struct dma_fast_smmu_mapping {
	struct device		*dev;
@@ -28,12 +29,10 @@ struct dma_fast_smmu_mapping {
	bool		have_stale_tlbs;

	dma_addr_t	pgtbl_dma_handle;
	av8l_fast_iopte	*pgtbl_pmds;
	struct io_pgtable_ops *pgtbl_ops;

	spinlock_t	lock;
	struct notifier_block notifier;

	int		is_smmu_pt_coherent;
};

#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+45 −6
Original line number Diff line number Diff line
@@ -8,13 +8,52 @@

#include <linux/notifier.h>

/*
 * This ought to be private to io-pgtable-fast, but dma-mapping-fast
 * currently requires it for a debug usecase.
 */
typedef u64 av8l_fast_iopte;

#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12))
struct io_pgtable_ops;

#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST

int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova,
			 phys_addr_t paddr, size_t size, int prot);

void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova,
				size_t size);

bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops,
					unsigned long iova);

phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
					  unsigned long iova);
#else
static inline int
av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova,
		     phys_addr_t paddr, size_t size, int prot)
{
	return -EINVAL;
}
static inline void av8l_fast_unmap_public(struct io_pgtable_ops *ops,
					  unsigned long iova, size_t size)
{
}

static inline bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops,
						  unsigned long iova)
{
	return false;
}
static inline phys_addr_t
av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
				  unsigned long iova)
{
	return 0;
}
#endif /* CONFIG_IOMMU_IO_PGTABLE_FAST */

int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
			 int prot);
void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);

/* events for notifiers passed to av8l_register_notify */
#define MAPPED_OVER_STALE_TLB 1
@@ -29,14 +68,14 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
 */
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa

void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync);
void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, bool skip_sync);
void av8l_register_notify(struct notifier_block *nb);

#else  /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */

#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0

static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops,
					      bool skip_sync)
{
}
Loading