Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aafd8ba0 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

iommu/amd: Implement add_device and remove_device



Implement these two iommu-ops call-backs to make use of the
initialization and notifier features of the iommu core.

Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 063071df
Loading
Loading
Loading
Loading
+54 −156
Original line number Diff line number Diff line
@@ -119,7 +119,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;

static void update_domain(struct protection_domain *domain);
static int __init alloc_passthrough_domain(void);
static int alloc_passthrough_domain(void);

/****************************************************************************
 *
@@ -434,64 +434,15 @@ static void iommu_uninit_device(struct device *dev)
	/* Unlink from alias, it may change if another device is re-plugged */
	dev_data->alias_data = NULL;

	/* Remove dma-ops */
	dev->archdata.dma_ops = NULL;

	/*
	 * We keep dev_data around for unplugged devices and reuse it when the
	 * device is re-plugged - not doing so would introduce a ton of races.
	 */
}

void __init amd_iommu_uninit_devices(void)
{
	struct iommu_dev_data *dev_data, *n;
	struct pci_dev *pdev = NULL;

	for_each_pci_dev(pdev) {

		if (!check_device(&pdev->dev))
			continue;

		iommu_uninit_device(&pdev->dev);
	}

	/* Free all of our dev_data structures */
	list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
		free_dev_data(dev_data);
}

int __init amd_iommu_init_devices(void)
{
	struct pci_dev *pdev = NULL;
	int ret = 0;

	for_each_pci_dev(pdev) {

		if (!check_device(&pdev->dev))
			continue;

		ret = iommu_init_device(&pdev->dev);
		if (ret == -ENOTSUPP)
			iommu_ignore_device(&pdev->dev);
		else if (ret)
			goto out_free;
	}

	/*
	 * Initialize IOMMU groups only after iommu_init_device() has
	 * had a chance to populate any IVRS defined aliases.
	 */
	for_each_pci_dev(pdev) {
		if (check_device(&pdev->dev))
			init_iommu_group(&pdev->dev);
	}

	return 0;

out_free:

	amd_iommu_uninit_devices();

	return ret;
}
#ifdef CONFIG_AMD_IOMMU_STATS

/*
@@ -2402,40 +2353,37 @@ static struct protection_domain *domain_for_device(struct device *dev)
	return dom;
}

static int device_change_notifier(struct notifier_block *nb,
				  unsigned long action, void *data)
static int amd_iommu_add_device(struct device *dev)
{
	struct dma_ops_domain *dma_domain;
	struct protection_domain *domain;
	struct iommu_dev_data *dev_data;
	struct device *dev = data;
	struct amd_iommu *iommu;
	unsigned long flags;
	u16 devid;
	int ret;

	if (!check_device(dev))
	if (!check_device(dev) || get_dev_data(dev))
		return 0;

	devid = get_device_id(dev);
	iommu = amd_iommu_rlookup_table[devid];
	dev_data = get_dev_data(dev);

	switch (action) {
	case BUS_NOTIFY_ADD_DEVICE:

		iommu_init_device(dev);
	ret = iommu_init_device(dev);
	if (ret == -ENOTSUPP) {
		iommu_ignore_device(dev);
		goto out;
	}
	init_iommu_group(dev);

		/*
		 * dev_data is still NULL and
		 * got initialized in iommu_init_device
		 */
	dev_data = get_dev_data(dev);

	if (iommu_pass_through || dev_data->iommu_v2) {
		/* Make sure passthrough domain is allocated */
		alloc_passthrough_domain();
		dev_data->passthrough = true;
		attach_device(dev, pt_domain);
			break;
		goto out;
	}

	domain = domain_for_device(dev);
@@ -2448,35 +2396,36 @@ static int device_change_notifier(struct notifier_block *nb,
			goto out;
		dma_domain->target_dev = devid;

		init_unity_mappings_for_device(dma_domain, devid);

		spin_lock_irqsave(&iommu_pd_list_lock, flags);
		list_add_tail(&dma_domain->list, &iommu_pd_list);
		spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
	}

		dev->archdata.dma_ops = &amd_iommu_dma_ops;

		break;
	case BUS_NOTIFY_REMOVED_DEVICE:

		iommu_uninit_device(dev);
	attach_device(dev, &dma_domain->domain);

	default:
		goto out;
	}
	dev->archdata.dma_ops = &amd_iommu_dma_ops;

out:
	iommu_completion_wait(iommu);

out:
	return 0;
}

static struct notifier_block device_nb = {
	.notifier_call = device_change_notifier,
};

void amd_iommu_init_notifier(void)
static void amd_iommu_remove_device(struct device *dev)
{
	bus_register_notifier(&pci_bus_type, &device_nb);
	struct amd_iommu *iommu;
	u16 devid;

	if (!check_device(dev))
		return;

	devid = get_device_id(dev);
	iommu = amd_iommu_rlookup_table[devid];

	iommu_uninit_device(dev);
	iommu_completion_wait(iommu);
}

/*****************************************************************************
@@ -3018,54 +2967,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
	return check_device(dev);
}

/*
 * The function for pre-allocating protection domains.
 *
 * If the driver core informs the DMA layer if a driver grabs a device
 * we don't need to preallocate the protection domains anymore.
 * For now we have to.
 */
static void __init prealloc_protection_domains(void)
{
	struct iommu_dev_data *dev_data;
	struct dma_ops_domain *dma_dom;
	struct pci_dev *dev = NULL;
	u16 devid;

	for_each_pci_dev(dev) {

		/* Do we handle this device? */
		if (!check_device(&dev->dev))
			continue;

		dev_data = get_dev_data(&dev->dev);
		if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
			/* Make sure passthrough domain is allocated */
			alloc_passthrough_domain();
			dev_data->passthrough = true;
			attach_device(&dev->dev, pt_domain);
			pr_info("AMD-Vi: Using passthrough domain for device %s\n",
				dev_name(&dev->dev));
		}

		/* Is there already any domain for it? */
		if (domain_for_device(&dev->dev))
			continue;

		devid = get_device_id(&dev->dev);

		dma_dom = dma_ops_domain_alloc();
		if (!dma_dom)
			continue;
		init_unity_mappings_for_device(dma_dom, devid);
		dma_dom->target_dev = devid;

		attach_device(&dev->dev, &dma_dom->domain);

		list_add_tail(&dma_dom->list, &iommu_pd_list);
	}
}

static struct dma_map_ops amd_iommu_dma_ops = {
	.alloc = alloc_coherent,
	.free = free_coherent,
@@ -3131,11 +3032,6 @@ int __init amd_iommu_init_dma_ops(void)
			goto free_domains;
	}

	/*
	 * Pre-allocate the protection domains for each device.
	 */
	prealloc_protection_domains();

	iommu_detected = 1;
	swiotlb = 0;

@@ -3228,7 +3124,7 @@ static struct protection_domain *protection_domain_alloc(void)
	return NULL;
}

static int __init alloc_passthrough_domain(void)
static int alloc_passthrough_domain(void)
{
	if (pt_domain != NULL)
		return 0;
@@ -3470,6 +3366,8 @@ static const struct iommu_ops amd_iommu_ops = {
	.unmap = amd_iommu_unmap,
	.map_sg = default_iommu_map_sg,
	.iova_to_phys = amd_iommu_iova_to_phys,
	.add_device = amd_iommu_add_device,
	.remove_device = amd_iommu_remove_device,
	.get_dm_regions = amd_iommu_get_dm_regions,
	.put_dm_regions = amd_iommu_put_dm_regions,
	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
+9 −22
Original line number Diff line number Diff line
@@ -226,6 +226,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;

static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);

static inline void update_last_devid(u16 devid)
{
@@ -1385,7 +1386,12 @@ static int __init amd_iommu_init_pci(void)
			break;
	}

	ret = amd_iommu_init_devices();
	init_device_table_dma();

	for_each_iommu(iommu)
		iommu_flush_all_caches(iommu);

	amd_iommu_init_api();

	print_iommu_info();

@@ -1825,8 +1831,6 @@ static bool __init check_ioapic_information(void)

static void __init free_dma_resources(void)
{
	amd_iommu_uninit_devices();

	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
		   get_order(MAX_DOMAIN_ID/8));

@@ -2019,27 +2023,10 @@ static bool detect_ivrs(void)

static int amd_iommu_init_dma(void)
{
	struct amd_iommu *iommu;
	int ret;

	if (iommu_pass_through)
		ret = amd_iommu_init_passthrough();
		return amd_iommu_init_passthrough();
	else
		ret = amd_iommu_init_dma_ops();

	if (ret)
		return ret;

	init_device_table_dma();

	for_each_iommu(iommu)
		iommu_flush_all_caches(iommu);

	amd_iommu_init_api();

	amd_iommu_init_notifier();

	return 0;
		return amd_iommu_init_dma_ops();
}

/****************************************************************************