Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb5ecd1a authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel
Browse files

iommu/amd: Add support for fast IOTLB flushing



Since AMD IOMMU driver currently flushes all TLB entries
when page size is more than one, use the same interface
for both iommu_ops.flush_iotlb_all() and iommu_ops.iotlb_sync().

Cc: Joerg Roedel <joro@8bytes.org>
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent df42a04b
Loading
Loading
Loading
Loading
+16 −3
Original line number Diff line number Diff line
@@ -3056,9 +3056,6 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
	unmap_size = iommu_unmap_page(domain, iova, page_size);
	mutex_unlock(&domain->api_lock);

	domain_flush_tlb_pde(domain);
	domain_flush_complete(domain);

	return unmap_size;
}

@@ -3176,6 +3173,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
	return dev_data->defer_attach;
}

static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
	struct protection_domain *dom = to_pdomain(domain);

	domain_flush_tlb_pde(dom);
	domain_flush_complete(dom);
}

static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
				      unsigned long iova, size_t size)
{
}

const struct iommu_ops amd_iommu_ops = {
	.capable = amd_iommu_capable,
	.domain_alloc = amd_iommu_domain_alloc,
@@ -3194,6 +3204,9 @@ const struct iommu_ops amd_iommu_ops = {
	.apply_resv_region = amd_iommu_apply_resv_region,
	.is_attach_deferred = amd_iommu_is_attach_deferred,
	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
	.flush_iotlb_all = amd_iommu_flush_iotlb_all,
	.iotlb_range_add = amd_iommu_iotlb_range_add,
	.iotlb_sync = amd_iommu_flush_iotlb_all,
};

/*****************************************************************************