Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5d214fe6 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

x86/amd-iommu: Protect IOMMU-API map/unmap path



This patch introduces a mutex to lock page table updates in
the IOMMU-API path. We can't use the spin_lock here because
this patch might sleep.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 339d3261
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#define _ASM_X86_AMD_IOMMU_TYPES_H

#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>

@@ -237,6 +238,7 @@ struct protection_domain {
	struct list_head list;  /* for list of all protection domains */
	struct list_head dev_list; /* List of all devices in this domain */
	spinlock_t lock;	/* mostly used to lock the page table*/
	struct mutex api_lock;	/* protect page tables in the iommu-api path */
	u16 id;			/* the domain id written to the device table */
	int mode;		/* paging mode (0-6 levels) */
	u64 *pt_root;		/* page table root pointer */
+9 −0
Original line number Diff line number Diff line
@@ -2327,6 +2327,7 @@ static struct protection_domain *protection_domain_alloc(void)
		return NULL;

	spin_lock_init(&domain->lock);
	mutex_init(&domain->api_lock);
	domain->id = domain_id_alloc();
	if (!domain->id)
		goto out_err;
@@ -2456,6 +2457,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
	iova  &= PAGE_MASK;
	paddr &= PAGE_MASK;

	mutex_lock(&domain->api_lock);

	for (i = 0; i < npages; ++i) {
		ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
		if (ret)
@@ -2465,6 +2468,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
		paddr += PAGE_SIZE;
	}

	mutex_unlock(&domain->api_lock);

	return 0;
}

@@ -2477,12 +2482,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,

	iova  &= PAGE_MASK;

	mutex_lock(&domain->api_lock);

	for (i = 0; i < npages; ++i) {
		iommu_unmap_page(domain, iova, PM_MAP_4k);
		iova  += PAGE_SIZE;
	}

	iommu_flush_tlb_pde(domain);

	mutex_unlock(&domain->api_lock);
}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,