Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd62190a authored by Joerg Roedel's avatar Joerg Roedel
Browse files

iommu/amd: Make use of the per-domain flush queue



Fill the flush-queue on unmap and only flush the IOMMU and
device TLBs when a per-cpu queue gets full.

Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent d4241a27
Loading
Loading
Loading
Loading
+56 −4
Original line number Diff line number Diff line
@@ -1806,6 +1806,61 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
	return 0;
}

static inline bool queue_ring_full(struct flush_queue *queue)
{
	return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
}

#define queue_ring_for_each(i, q) \
	for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)

static void queue_release(struct dma_ops_domain *dom,
			  struct flush_queue *queue)
{
	unsigned i;

	queue_ring_for_each(i, queue)
		free_iova_fast(&dom->iovad,
			       queue->entries[i].iova_pfn,
			       queue->entries[i].pages);

	queue->head = queue->tail = 0;
}

static inline unsigned queue_ring_add(struct flush_queue *queue)
{
	unsigned idx = queue->tail;

	queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;

	return idx;
}

static void queue_add(struct dma_ops_domain *dom,
		      unsigned long address, unsigned long pages)
{
	struct flush_queue *queue;
	int idx;

	pages     = __roundup_pow_of_two(pages);
	address >>= PAGE_SHIFT;

	queue = get_cpu_ptr(dom->flush_queue);

	if (queue_ring_full(queue)) {
		domain_flush_tlb(&dom->domain);
		domain_flush_complete(&dom->domain);
		queue_release(dom, queue);
	}

	idx = queue_ring_add(queue);

	queue->entries[idx].iova_pfn = address;
	queue->entries[idx].pages    = pages;

	put_cpu_ptr(dom->flush_queue);
}

/*
 * Free a domain, only used if something went wrong in the
 * allocation path and we need to free an already allocated page table
@@ -2454,10 +2509,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
		domain_flush_tlb(&dma_dom->domain);
		domain_flush_complete(&dma_dom->domain);
	} else {
		/* Keep the if() around, we need it later again */
		dma_ops_free_iova(dma_dom, dma_addr, pages);
		domain_flush_tlb(&dma_dom->domain);
		domain_flush_complete(&dma_dom->domain);
		queue_add(dma_dom, dma_addr, pages);
	}
}