Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab7032bb authored by Joerg Roedel's avatar Joerg Roedel
Browse files

iommu/amd: Remove need_flush from struct dma_ops_domain



The flushing of iommu tlbs is now done on a per-range basis.
So there is no need anymore for domain-wide flush tracking.

Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2a87442c
Loading
Loading
Loading
Loading
+6 −24
Original line number Diff line number Diff line
@@ -151,9 +151,6 @@ struct dma_ops_domain {

	/* address space relevant data */
	struct aperture_range *aperture[APERTURE_MAX_RANGES];

	/* This will be set to true when TLB needs to be flushed */
	bool need_flush;
};

/****************************************************************************
@@ -1563,7 +1560,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
					unsigned long align_mask,
					u64 dma_mask)
{
	unsigned long next_bit, boundary_size, mask;
	unsigned long boundary_size, mask;
	unsigned long address = -1;
	int start = dom->next_index;
	int i;
@@ -1581,8 +1578,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
		if (!range || range->offset >= dma_mask)
			continue;

		next_bit  = range->next_bit;

		address = dma_ops_aperture_alloc(dom, range, pages,
						 dma_mask, boundary_size,
						 align_mask);
@@ -1591,9 +1586,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
			dom->next_index = i;
			break;
		}

		if (next_bit > range->next_bit)
			dom->need_flush = true;
	}

	return address;
@@ -1609,7 +1601,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,

#ifdef CONFIG_IOMMU_STRESS
	dom->next_index = 0;
	dom->need_flush = true;
#endif

	address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask);
@@ -1642,7 +1633,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
		return;
#endif

	if (address + pages > range->next_bit) {
	if (amd_iommu_unmap_flush ||
	    (address + pages > range->next_bit)) {
		domain_flush_tlb(&dom->domain);
		domain_flush_complete(&dom->domain);
	}
@@ -1868,8 +1860,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
	if (!dma_dom->domain.pt_root)
		goto free_dma_dom;

	dma_dom->need_flush = false;

	add_domain_to_list(&dma_dom->domain);

	if (alloc_new_range(dma_dom, true, GFP_KERNEL))
@@ -2503,11 +2493,10 @@ static dma_addr_t __map_single(struct device *dev,

	ADD_STATS_COUNTER(alloced_io_mem, size);

	if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
		domain_flush_tlb(&dma_dom->domain);
		dma_dom->need_flush = false;
	} else if (unlikely(amd_iommu_np_cache))
	if (unlikely(amd_iommu_np_cache)) {
		domain_flush_pages(&dma_dom->domain, address, size);
		domain_flush_complete(&dma_dom->domain);
	}

out:
	return address;
@@ -2519,8 +2508,6 @@ static dma_addr_t __map_single(struct device *dev,
		dma_ops_domain_unmap(dma_dom, start);
	}

	domain_flush_pages(&dma_dom->domain, address, size);

	dma_ops_free_addresses(dma_dom, address, pages);

	return DMA_ERROR_CODE;
@@ -2553,11 +2540,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
		start += PAGE_SIZE;
	}

	if (amd_iommu_unmap_flush || dma_dom->need_flush) {
		domain_flush_pages(&dma_dom->domain, flush_addr, size);
		dma_dom->need_flush = false;
	}

	SUB_STATS_COUNTER(alloced_io_mem, size);

	dma_ops_free_addresses(dma_dom, dma_addr, pages);