Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 63b1ef6c authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu/iova: Free global iova rcache on iova alloc failure"

parents e21de0b6 adbe0da1
Loading
Loading
Loading
Loading
+40 −6
Original line number Diff line number Diff line
@@ -212,8 +212,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
	struct rb_node *curr, *prev;
	struct iova *curr_iova;
	unsigned long flags;
	unsigned long new_pfn;
	unsigned long new_pfn, low_pfn_new;
	unsigned long align_mask = ~0UL;
	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;

	if (size_aligned)
		align_mask <<= limit_align(iovad, fls_long(size - 1));
@@ -222,15 +223,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
	curr = __get_cached_rbnode(iovad, limit_pfn);
	curr_iova = rb_entry(curr, struct iova, node);
	low_pfn_new = curr_iova->pfn_hi + 1;

retry:
	do {
		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
		new_pfn = (limit_pfn - size) & align_mask;
		high_pfn = min(high_pfn, curr_iova->pfn_lo);
		new_pfn = (high_pfn - size) & align_mask;
		prev = curr;
		curr = rb_prev(curr);
		curr_iova = rb_entry(curr, struct iova, node);
	} while (curr && new_pfn <= curr_iova->pfn_hi);
	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);

	if (limit_pfn < size || new_pfn < iovad->start_pfn) {
	if (high_pfn < size || new_pfn < low_pfn) {
		if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
			high_pfn = limit_pfn;
			low_pfn = low_pfn_new;
			curr = &iovad->anchor.node;
			curr_iova = rb_entry(curr, struct iova, node);
			goto retry;
		}
		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
		return -ENOMEM;
	}
@@ -521,6 +532,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
		flush_rcache = false;
		for_each_online_cpu(cpu)
			free_cpu_cached_iovas(cpu, iovad);
		free_global_cached_iovas(iovad);
		goto retry;
	}

@@ -1134,5 +1146,27 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
	}
}

/*
 * free all the IOVA ranges of global cache
 */
void free_global_cached_iovas(struct iova_domain *iovad)
{
	struct iova_rcache *rcache;
	unsigned long flags;
	int i, j;

	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		rcache = &iovad->rcaches[i];
		spin_lock_irqsave(&rcache->lock, flags);
		for (j = 0; j < rcache->depot_size; ++j) {
			iova_magazine_free_pfns(rcache->depot[j], iovad);
			iova_magazine_free(rcache->depot[j]);
			rcache->depot[j] = NULL;
		}
		rcache->depot_size = 0;
		spin_unlock_irqrestore(&rcache->lock, flags);
	}
}

MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
+6 −0
Original line number Diff line number Diff line
@@ -166,6 +166,7 @@ void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
void free_global_cached_iovas(struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
@@ -273,6 +274,11 @@ static inline void free_cpu_cached_iovas(unsigned int cpu,
					 struct iova_domain *iovad)
{
}

static inline void free_global_cached_iovas(struct iova_domain *iovad)
{
}

#endif

#endif