Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4de354ec authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel
Browse files

iommu/vt-d: Delegate the identity domain to upper layer



This allows the iommu generic layer to allocate an identity domain
and attach it to a device. Hence, the identity domain is delegated
to upper layer. As a side effect, iommu_identity_mapping can't be
used to check the existence of identity domains any more.

Signed-off-by: default avatarJames Sewart <jamessewart@arista.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent f273a453
Loading
Loading
Loading
Loading
+58 −32
Original line number Diff line number Diff line
@@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu,
				 struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);

#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -2808,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);

static int __init si_domain_init(int hw)
{
	int nid, ret;
	struct dmar_rmrr_unit *rmrr;
	struct device *dev;
	int i, nid, ret;

	si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
	if (!si_domain)
@@ -2819,8 +2822,6 @@ static int __init si_domain_init(int hw)
		return -EFAULT;
	}

	pr_debug("Identity mapping domain allocated\n");

	if (hw)
		return 0;

@@ -2836,6 +2837,31 @@ static int __init si_domain_init(int hw)
		}
	}

	/*
	 * Normally we use DMA domains for devices which have RMRRs. But we
	 * loose this requirement for graphic and usb devices. Identity map
	 * the RMRRs for graphic and USB devices so that they could use the
	 * si_domain.
	 */
	for_each_rmrr_units(rmrr) {
		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
					  i, dev) {
			unsigned long long start = rmrr->base_address;
			unsigned long long end = rmrr->end_address;

			if (device_is_rmrr_locked(dev))
				continue;

			if (WARN_ON(end < start ||
				    end >> agaw_to_width(si_domain->agaw)))
				continue;

			ret = iommu_domain_identity_map(si_domain, start, end);
			if (ret)
				return ret;
		}
	}

	return 0;
}

@@ -2843,9 +2869,6 @@ static int identity_mapping(struct device *dev)
{
	struct device_domain_info *info;

	if (likely(!iommu_identity_mapping))
		return 0;

	info = dev->archdata.iommu;
	if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
		return (info->domain == si_domain);
@@ -3431,11 +3454,9 @@ static int __init init_dmars(void)

	check_tylersburg_isoch();

	if (iommu_identity_mapping) {
	ret = si_domain_init(hw_pass_through);
	if (ret)
		goto free_iommu;
	}


	/*
@@ -3628,9 +3649,6 @@ static bool iommu_need_mapping(struct device *dev)
	if (iommu_dummy(dev))
		return false;

	if (!iommu_identity_mapping)
		return true;

	found = identity_mapping(dev);
	if (found) {
		if (iommu_should_identity_map(dev, 0))
@@ -5051,9 +5069,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
	struct dmar_domain *dmar_domain;
	struct iommu_domain *domain;

	if (type != IOMMU_DOMAIN_UNMANAGED)
		return NULL;

	switch (type) {
	case IOMMU_DOMAIN_UNMANAGED:
		dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
		if (!dmar_domain) {
			pr_err("Can't allocate dmar_domain\n");
@@ -5068,14 +5085,23 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)

		domain = &dmar_domain->domain;
		domain->geometry.aperture_start = 0;
	domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
		domain->geometry.aperture_end   =
				__DOMAIN_MAX_ADDR(dmar_domain->gaw);
		domain->geometry.force_aperture = true;

		return domain;
	case IOMMU_DOMAIN_IDENTITY:
		return &si_domain->domain;
	default:
		return NULL;
	}

	return NULL;
}

static void intel_iommu_domain_free(struct iommu_domain *domain)
{
	if (domain != &si_domain->domain)
		domain_exit(to_dmar_domain(domain));
}