Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bd22172 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

amd-iommu: introduce for_each_iommu* macros



This patch introduces the for_each_iommu and for_each_iommu_safe macros
to simplify the developers life when having to iterate over all AMD
IOMMUs in the system.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 41fb454e
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -195,6 +195,14 @@
#define PD_DEFAULT_MASK		(1UL << 1) /* domain is a default dma_ops
					      domain for an IOMMU */

/*
 * Make iterating over all IOMMUs easier
 */
#define for_each_iommu(iommu) \
	list_for_each_entry((iommu), &amd_iommu_list, list)
#define for_each_iommu_safe(iommu, next) \
	list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)

/*
 * This structure contains generic data for  IOMMU protection domains
 * independent of their use.
+4 −4
Original line number Diff line number Diff line
@@ -213,7 +213,7 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
{
	struct amd_iommu *iommu;

	list_for_each_entry(iommu, &amd_iommu_list, list)
	for_each_iommu(iommu)
		iommu_poll_events(iommu);

	return IRQ_HANDLED;
@@ -440,7 +440,7 @@ static void iommu_flush_domain(u16 domid)
	__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
				      domid, 1, 1);

	list_for_each_entry(iommu, &amd_iommu_list, list) {
	for_each_iommu(iommu) {
		spin_lock_irqsave(&iommu->lock, flags);
		__iommu_queue_command(iommu, &cmd);
		__iommu_completion_wait(iommu);
@@ -1672,7 +1672,7 @@ int __init amd_iommu_init_dma_ops(void)
	 * found in the system. Devices not assigned to any other
	 * protection domain will be assigned to the default one.
	 */
	list_for_each_entry(iommu, &amd_iommu_list, list) {
	for_each_iommu(iommu) {
		iommu->default_dom = dma_ops_domain_alloc(iommu, order);
		if (iommu->default_dom == NULL)
			return -ENOMEM;
@@ -1710,7 +1710,7 @@ int __init amd_iommu_init_dma_ops(void)

free_domains:

	list_for_each_entry(iommu, &amd_iommu_list, list) {
	for_each_iommu(iommu) {
		if (iommu->default_dom)
			dma_ops_domain_free(iommu->default_dom);
	}
+4 −4
Original line number Diff line number Diff line
@@ -679,7 +679,7 @@ static void __init free_iommu_all(void)
{
	struct amd_iommu *iommu, *next;

	list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) {
	for_each_iommu_safe(iommu, next) {
		list_del(&iommu->list);
		free_iommu_one(iommu);
		kfree(iommu);
@@ -779,7 +779,7 @@ static int __init iommu_setup_msix(struct amd_iommu *iommu)
	struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
	int nvec = 0, i;

	list_for_each_entry(curr, &amd_iommu_list, list) {
	for_each_iommu(curr) {
		if (curr->dev == iommu->dev) {
			entries[nvec].entry = curr->evt_msi_num;
			entries[nvec].vector = 0;
@@ -818,7 +818,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
	int r;
	struct amd_iommu *curr;

	list_for_each_entry(curr, &amd_iommu_list, list) {
	for_each_iommu(curr) {
		if (curr->dev == iommu->dev)
			curr->int_enabled = true;
	}
@@ -971,7 +971,7 @@ static void __init enable_iommus(void)
{
	struct amd_iommu *iommu;

	list_for_each_entry(iommu, &amd_iommu_list, list) {
	for_each_iommu(iommu) {
		iommu_set_exclusion_range(iommu);
		iommu_init_msi(iommu);
		iommu_enable_event_logging(iommu);