Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d79c4b94 authored by Patrick Daly's avatar Patrick Daly
Browse files

iommu: arm-smmu: Don't call iommu unmap with zero size



If guard pages are disabled, don't call iommu_unmap for them.
Additionally, ensure the iova passed to __fast_smmu_free_iova()
is properly aligned.

Change-Id: I26cb6b354deb8a15458e5b7bf704c9c77d1c24ec
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent 9a423dbc
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -1224,12 +1224,12 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,

	addr = addr & PAGE_MASK;
	size = PAGE_ALIGN(size);
	if (mapping->min_iova_align)
	if (mapping->min_iova_align) {
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
	else
		guard_len = 0;

		iommu_unmap(mapping->domain, addr + size, guard_len);
	} else {
		guard_len = 0;
	}

	start = (addr - mapping->base) >> PAGE_SHIFT;
	count = (size + guard_len) >> PAGE_SHIFT;
+4 −3
Original line number Diff line number Diff line
@@ -290,11 +290,12 @@ static void iommu_dma_free_iova(struct iommu_domain *domain,
	unsigned long shift = iova_shift(iovad);
	unsigned long guard_len;

	if (cookie->min_iova_align)
	if (cookie->min_iova_align) {
		guard_len = ALIGN(size, cookie->min_iova_align) - size;
	else
		guard_len = 0;
		iommu_unmap(domain, iova + size, guard_len);
	} else {
		guard_len = 0;
	}

	free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
}
+6 −5
Original line number Diff line number Diff line
@@ -311,13 +311,14 @@ static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
	unsigned long nbits;
	unsigned long guard_len;

	if (mapping->min_iova_align)
	if (mapping->min_iova_align) {
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
	else
		iommu_unmap(mapping->domain, iova + size, guard_len);
	} else {
		guard_len = 0;
	}
	nbits = (size + guard_len) >> FAST_PAGE_SHIFT;

	iommu_unmap(mapping->domain, iova + size, guard_len);

	/*
	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
@@ -436,7 +437,7 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
	spin_lock_irqsave(&mapping->lock, flags);
	av8l_fast_unmap_public(pmd, len);
	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
	__fast_smmu_free_iova(mapping, iova, len);
	__fast_smmu_free_iova(mapping, iova - offset, len);
	spin_unlock_irqrestore(&mapping->lock, flags);

	trace_unmap(mapping->domain, iova - offset, len, len);
@@ -744,7 +745,7 @@ static void fast_smmu_dma_unmap_resource(

	iommu_unmap(mapping->domain, addr - offset, len);
	spin_lock_irqsave(&mapping->lock, flags);
	__fast_smmu_free_iova(mapping, addr, len);
	__fast_smmu_free_iova(mapping, addr - offset, len);
	spin_unlock_irqrestore(&mapping->lock, flags);
}