Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c25ab8b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull IOVA fixes from David Woodhouse:
 "The main fix here is the first one, fixing the over-allocation of
   size-aligned requests.  The other patches simply make the existing
  IOVA code available to users other than the Intel VT-d driver, with no
  functional change.

  I concede the latter really *should* have been submitted during the
  merge window, but since it's basically risk-free and people are
  waiting to build on top of it and it's my fault I didn't get it in, I
  (and they) would be grateful if you'd take it"

* git://git.infradead.org/intel-iommu:
  iommu: Make the iova library a module
  iommu: iova: Export symbols
  iommu: iova: Move iova cache management to the iova library
  iommu/iova: Avoid over-allocating when size-aligned
parents bde17b90 15bbdec3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
endmenu

config IOMMU_IOVA
	bool
	tristate

config OF_IOMMU
       def_bool y
+5 −3
Original line number Diff line number Diff line
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev,

	/* Restrict dma_mask to the width that the iommu can handle */
	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
	/* Ensure we reserve the whole size-aligned region */
	nrpages = __roundup_pow_of_two(nrpages);

	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
		/*
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void)
static int __init iommu_init_mempool(void)
{
	int ret;
	ret = iommu_iova_cache_init();
	ret = iova_cache_get();
	if (ret)
		return ret;

@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void)

	kmem_cache_destroy(iommu_domain_cache);
domain_error:
	iommu_iova_cache_destroy();
	iova_cache_put();

	return -ENOMEM;
}
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void)
{
	kmem_cache_destroy(iommu_devinfo_cache);
	kmem_cache_destroy(iommu_domain_cache);
	iommu_iova_cache_destroy();
	iova_cache_put();
}

static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+69 −51
Original line number Diff line number Diff line
@@ -18,42 +18,9 @@
 */

#include <linux/iova.h>
#include <linux/module.h>
#include <linux/slab.h>

static struct kmem_cache *iommu_iova_cache;

int iommu_iova_cache_init(void)
{
	int ret = 0;

	iommu_iova_cache = kmem_cache_create("iommu_iova",
					 sizeof(struct iova),
					 0,
					 SLAB_HWCACHE_ALIGN,
					 NULL);
	if (!iommu_iova_cache) {
		pr_err("Couldn't create iova cache\n");
		ret = -ENOMEM;
	}

	return ret;
}

void iommu_iova_cache_destroy(void)
{
	kmem_cache_destroy(iommu_iova_cache);
}

struct iova *alloc_iova_mem(void)
{
	return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}

void free_iova_mem(struct iova *iova)
{
	kmem_cache_free(iommu_iova_cache, iova);
}

void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
	unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
	iovad->start_pfn = start_pfn;
	iovad->dma_32bit_pfn = pfn_32bit;
}
EXPORT_SYMBOL_GPL(init_iova_domain);

static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
	}
}

/* Computes the padding size required, to make the
 * the start address naturally aligned on its size
/*
 * Computes the padding size required, to make the start address
 * naturally aligned on the power-of-two order of its size
 */
static int
iova_get_pad_size(int size, unsigned int limit_pfn)
static unsigned int
iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
{
	unsigned int pad_size = 0;
	unsigned int order = ilog2(size);

	if (order)
		pad_size = (limit_pfn + 1) % (1 << order);

	return pad_size;
	return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
}

static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
	rb_insert_color(&iova->node, root);
}

static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);

struct iova *alloc_iova_mem(void)
{
	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL(alloc_iova_mem);

void free_iova_mem(struct iova *iova)
{
	kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);

int iova_cache_get(void)
{
	mutex_lock(&iova_cache_mutex);
	if (!iova_cache_users) {
		iova_cache = kmem_cache_create(
			"iommu_iova", sizeof(struct iova), 0,
			SLAB_HWCACHE_ALIGN, NULL);
		if (!iova_cache) {
			mutex_unlock(&iova_cache_mutex);
			printk(KERN_ERR "Couldn't create iova cache\n");
			return -ENOMEM;
		}
	}

	iova_cache_users++;
	mutex_unlock(&iova_cache_mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(iova_cache_get);

void iova_cache_put(void)
{
	mutex_lock(&iova_cache_mutex);
	if (WARN_ON(!iova_cache_users)) {
		mutex_unlock(&iova_cache_mutex);
		return;
	}
	iova_cache_users--;
	if (!iova_cache_users)
		kmem_cache_destroy(iova_cache);
	mutex_unlock(&iova_cache_mutex);
}
EXPORT_SYMBOL_GPL(iova_cache_put);

/**
 * alloc_iova - allocates an iova
 * @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
	if (!new_iova)
		return NULL;

	/* If size aligned is set then round the size to
	 * to next power of two.
	 */
	if (size_aligned)
		size = __roundup_pow_of_two(size);

	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
			new_iova, size_aligned);

@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,

	return new_iova;
}
EXPORT_SYMBOL_GPL(alloc_iova);

/**
 * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
	return NULL;
}
EXPORT_SYMBOL_GPL(find_iova);

/**
 * __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
	free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(__free_iova);

/**
 * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
		__free_iova(iovad, iova);

}
EXPORT_SYMBOL_GPL(free_iova);

/**
 * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
	}
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(put_iova_domain);

static int
__is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ reserve_iova(struct iova_domain *iovad,
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
	return iova;
}
EXPORT_SYMBOL_GPL(reserve_iova);

/**
 * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
	}
	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(copy_reserved_iova);

struct iova *
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
		free_iova_mem(prev);
	return NULL;
}

MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
+2 −2
Original line number Diff line number Diff line
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
	return iova >> iova_shift(iovad);
}

int iommu_iova_cache_init(void);
void iommu_iova_cache_destroy(void);
int iova_cache_get(void);
void iova_cache_put(void);

struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);