Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e10967e authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu: arm-smmu: Don't call iommu unmap with zero size"

parents b860f6fa d79c4b94
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -1224,12 +1224,12 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,

	addr = addr & PAGE_MASK;
	size = PAGE_ALIGN(size);
	if (mapping->min_iova_align)
	if (mapping->min_iova_align) {
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
	else
		guard_len = 0;

		iommu_unmap(mapping->domain, addr + size, guard_len);
	} else {
		guard_len = 0;
	}

	start = (addr - mapping->base) >> PAGE_SHIFT;
	count = (size + guard_len) >> PAGE_SHIFT;
@@ -1979,7 +1979,7 @@ bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
{
	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
	int vmid = VMID_HLOS;
	bool min_iova_align = 0;
	int min_iova_align = 0;

	iommu_domain_get_attr(mapping->domain,
			DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
+4 −3
Original line number Diff line number Diff line
@@ -290,11 +290,12 @@ static void iommu_dma_free_iova(struct iommu_domain *domain,
	unsigned long shift = iova_shift(iovad);
	unsigned long guard_len;

	if (cookie->min_iova_align)
	if (cookie->min_iova_align) {
		guard_len = ALIGN(size, cookie->min_iova_align) - size;
	else
		guard_len = 0;
		iommu_unmap(domain, iova + size, guard_len);
	} else {
		guard_len = 0;
	}

	free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
}
+6 −5
Original line number Diff line number Diff line
@@ -311,13 +311,14 @@ static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
	unsigned long nbits;
	unsigned long guard_len;

	if (mapping->min_iova_align)
	if (mapping->min_iova_align) {
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
	else
		iommu_unmap(mapping->domain, iova + size, guard_len);
	} else {
		guard_len = 0;
	}
	nbits = (size + guard_len) >> FAST_PAGE_SHIFT;

	iommu_unmap(mapping->domain, iova + size, guard_len);

	/*
	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
@@ -436,7 +437,7 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
	spin_lock_irqsave(&mapping->lock, flags);
	av8l_fast_unmap_public(pmd, len);
	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
	__fast_smmu_free_iova(mapping, iova, len);
	__fast_smmu_free_iova(mapping, iova - offset, len);
	spin_unlock_irqrestore(&mapping->lock, flags);

	trace_unmap(mapping->domain, iova - offset, len, len);
@@ -744,7 +745,7 @@ static void fast_smmu_dma_unmap_resource(

	iommu_unmap(mapping->domain, addr - offset, len);
	spin_lock_irqsave(&mapping->lock, flags);
	__fast_smmu_free_iova(mapping, addr, len);
	__fast_smmu_free_iova(mapping, addr - offset, len);
	spin_unlock_irqrestore(&mapping->lock, flags);
}