Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a06ec394 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

Merge branch 'iommu/page-sizes' into x86/amd

Conflicts:
	drivers/iommu/amd_iommu.c
parents 175d6146 6c274d1c
Loading
Loading
Loading
Loading
+24 −8
Original line number Diff line number Diff line
@@ -44,6 +44,24 @@

#define LOOP_TIMEOUT	100000

/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
 * Traditionally the IOMMU core just handed us the mappings directly,
 * after making sure the size is an order of a 4KiB page and that the
 * mapping has natural alignment.
 *
 * To retain this behavior, we currently advertise that we support
 * all page sizes that are an order of 4KiB.
 *
 * If at some point we'd like to utilize the IOMMU core's new behavior,
 * we could change this to advertise the real page sizes we support.
 */
#define AMD_IOMMU_PGSIZES	(~0xFFFUL)

static DEFINE_RWLOCK(amd_iommu_devtable_lock);

/* A list of preallocated protection domains */
@@ -3093,9 +3111,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}

static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
			 phys_addr_t paddr, int gfp_order, int iommu_prot)
			 phys_addr_t paddr, size_t page_size, int iommu_prot)
{
	unsigned long page_size = 0x1000UL << gfp_order;
	struct protection_domain *domain = dom->priv;
	int prot = 0;
	int ret;
@@ -3115,24 +3132,22 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
	return ret;
}

static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
			   int gfp_order)
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
			   size_t page_size)
{
	struct protection_domain *domain = dom->priv;
	unsigned long page_size, unmap_size;
	size_t unmap_size;

	if (domain->mode == PAGE_MODE_NONE)
		return -EINVAL;

	page_size  = 0x1000UL << gfp_order;

	mutex_lock(&domain->api_lock);
	unmap_size = iommu_unmap_page(domain, iova, page_size);
	mutex_unlock(&domain->api_lock);

	domain_flush_tlb_pde(domain);

	return get_order(unmap_size);
	return unmap_size;
}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3182,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
	.unmap = amd_iommu_unmap,
	.iova_to_phys = amd_iommu_iova_to_phys,
	.domain_has_cap = amd_iommu_domain_has_cap,
	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
};

/*****************************************************************************
+23 −7
Original line number Diff line number Diff line
@@ -78,6 +78,24 @@
#define LEVEL_STRIDE		(9)
#define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)

/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
 * Traditionally the IOMMU core just handed us the mappings directly,
 * after making sure the size is an order of a 4KiB page and that the
 * mapping has natural alignment.
 *
 * To retain this behavior, we currently advertise that we support
 * all page sizes that are an order of 4KiB.
 *
 * If at some point we'd like to utilize the IOMMU core's new behavior,
 * we could change this to advertise the real page sizes we support.
 */
#define INTEL_IOMMU_PGSIZES	(~0xFFFUL)

static inline int agaw_to_level(int agaw)
{
	return agaw + 2;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,

static int intel_iommu_map(struct iommu_domain *domain,
			   unsigned long iova, phys_addr_t hpa,
			   int gfp_order, int iommu_prot)
			   size_t size, int iommu_prot)
{
	struct dmar_domain *dmar_domain = domain->priv;
	u64 max_addr;
	int prot = 0;
	size_t size;
	int ret;

	if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
	if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
		prot |= DMA_PTE_SNP;

	size     = PAGE_SIZE << gfp_order;
	max_addr = iova + size;
	if (dmar_domain->max_addr < max_addr) {
		u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
	return ret;
}

static int intel_iommu_unmap(struct iommu_domain *domain,
			     unsigned long iova, int gfp_order)
static size_t intel_iommu_unmap(struct iommu_domain *domain,
			     unsigned long iova, size_t size)
{
	struct dmar_domain *dmar_domain = domain->priv;
	size_t size = PAGE_SIZE << gfp_order;
	int order;

	order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
	if (dmar_domain->max_addr == iova + size)
		dmar_domain->max_addr = iova;

	return order;
	return PAGE_SIZE << order;
}

static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
	.unmap		= intel_iommu_unmap,
	.iova_to_phys	= intel_iommu_iova_to_phys,
	.domain_has_cap = intel_iommu_domain_has_cap,
	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,
};

static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+107 −12
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#define pr_fmt(fmt)    "%s: " fmt, __func__

#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bug.h>
@@ -157,32 +159,125 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);

int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, int gfp_order, int prot)
	      phys_addr_t paddr, size_t size, int prot)
{
	size_t size;
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
	int ret = 0;

	if (unlikely(domain->ops->map == NULL))
		return -ENODEV;

	size         = PAGE_SIZE << gfp_order;
	/* find out the minimum page size supported */
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	/*
	 * both the virtual address and the physical one, as well as
	 * the size of the mapping, must be aligned (at least) to the
	 * size of the smallest page supported by the hardware
	 */
	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
			"0x%x\n", iova, (unsigned long)paddr,
			(unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
				(unsigned long)paddr, (unsigned long)size);

	while (size) {
		unsigned long pgsize, addr_merge = iova | paddr;
		unsigned int pgsize_idx;

		/* Max page size that still fits into 'size' */
		pgsize_idx = __fls(size);

		/* need to consider alignment requirements ? */
		if (likely(addr_merge)) {
			/* Max page size allowed by both iova and paddr */
			unsigned int align_pgsize_idx = __ffs(addr_merge);

			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
		}

		/* build a mask of acceptable page sizes */
		pgsize = (1UL << (pgsize_idx + 1)) - 1;

		/* throw away page sizes not supported by the hardware */
		pgsize &= domain->ops->pgsize_bitmap;

		/* make sure we're still sane */
		BUG_ON(!pgsize);

		/* pick the biggest page */
		pgsize_idx = __fls(pgsize);
		pgsize = 1UL << pgsize_idx;

		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
					(unsigned long)paddr, pgsize);

	BUG_ON(!IS_ALIGNED(iova | paddr, size));
		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	/* unroll mapping in case something went wrong */
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);

	return domain->ops->map(domain, iova, paddr, gfp_order, prot);
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);

int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
	size_t size;
	size_t unmapped_page, unmapped = 0;
	unsigned int min_pagesz;

	if (unlikely(domain->ops->unmap == NULL))
		return -ENODEV;

	size         = PAGE_SIZE << gfp_order;
	/* find out the minimum page size supported */
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	/*
	 * The virtual address, as well as the size of the mapping, must be
	 * aligned (at least) to the size of the smallest page supported
	 * by the hardware
	 */
	if (!IS_ALIGNED(iova | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
					iova, (unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
							(unsigned long)size);

	/*
	 * Keep iterating until we either unmap 'size' bytes (or more)
	 * or we hit an area that isn't mapped.
	 */
	while (unmapped < size) {
		size_t left = size - unmapped;

		unmapped_page = domain->ops->unmap(domain, iova, left);
		if (!unmapped_page)
			break;

	BUG_ON(!IS_ALIGNED(iova, size));
		pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
					(unsigned long)unmapped_page);

		iova += unmapped_page;
		unmapped += unmapped_page;
	}

	return domain->ops->unmap(domain, iova, gfp_order);
	return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+12 −13
Original line number Diff line number Diff line
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
#define RCP15_PRRR(reg)		MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_NMRR(reg)		MRC(reg, p15, 0, c10, c2, 1)

/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)

static int msm_iommu_tex_class[4];

DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
}

static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
			 phys_addr_t pa, int order, int prot)
			 phys_addr_t pa, size_t len, int prot)
{
	struct msm_priv *priv;
	unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
	unsigned long *sl_pte;
	unsigned long sl_offset;
	unsigned int pgprot;
	size_t len = 0x1000UL << order;
	int ret = 0, tex, sh;

	spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
	return ret;
}

static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
			    int order)
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
			    size_t len)
{
	struct msm_priv *priv;
	unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
	unsigned long *sl_table;
	unsigned long *sl_pte;
	unsigned long sl_offset;
	size_t len = 0x1000UL << order;
	int i, ret = 0;

	spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,

	ret = __flush_iotlb(domain);

	/*
	 * the IOMMU API requires us to return the order of the unmapped
	 * page (on success).
	 */
	if (!ret)
		ret = order;
fail:
	spin_unlock_irqrestore(&msm_iommu_lock, flags);
	return ret;

	/* the IOMMU API requires us to return how many bytes were unmapped */
	len = ret ? 0 : len;
	return len;
}

static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
	.map = msm_iommu_map,
	.unmap = msm_iommu_unmap,
	.iova_to_phys = msm_iommu_iova_to_phys,
	.domain_has_cap = msm_iommu_domain_has_cap
	.domain_has_cap = msm_iommu_domain_has_cap,
	.pgsize_bitmap = MSM_IOMMU_PGSIZES,
};

static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+9 −9
Original line number Diff line number Diff line
@@ -33,6 +33,9 @@
	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
	     __i++)

/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)

/**
 * struct omap_iommu_domain - omap iommu domain
 * @pgtable:	the page table
@@ -1019,12 +1022,11 @@ static void iopte_cachep_ctor(void *iopte)
}

static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
			 phys_addr_t pa, int order, int prot)
			 phys_addr_t pa, size_t bytes, int prot)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
	struct device *dev = oiommu->dev;
	size_t bytes = PAGE_SIZE << order;
	struct iotlb_entry e;
	int omap_pgsz;
	u32 ret, flags;
@@ -1049,19 +1051,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
	return ret;
}

static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
			    int order)
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
			    size_t size)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
	struct device *dev = oiommu->dev;
	size_t unmap_size;

	dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);

	unmap_size = iopgtable_clear_entry(oiommu, da);
	dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);

	return unmap_size ? get_order(unmap_size) : -EINVAL;
	return iopgtable_clear_entry(oiommu, da);
}

static int
@@ -1211,6 +1210,7 @@ static struct iommu_ops omap_iommu_ops = {
	.unmap		= omap_iommu_unmap,
	.iova_to_phys	= omap_iommu_iova_to_phys,
	.domain_has_cap	= omap_iommu_domain_has_cap,
	.pgsize_bitmap	= OMAP_IOMMU_PGSIZES,
};

static int __init omap_iommu_init(void)
Loading