Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 09ee17eb authored by Joerg Roedel's avatar Joerg Roedel
Browse files

AMD IOMMU: fix possible race while accessing iommu->need_sync



The access to the iommu->need_sync member needs to be protected by the
iommu->lock. Otherwise this is a possible race condition. Fix it with
this patch.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent f91ba190
Loading
Loading
Loading
Loading
+13 −20
Original line number Original line Diff line number Diff line
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)


	spin_lock_irqsave(&iommu->lock, flags);
	spin_lock_irqsave(&iommu->lock, flags);
	ret = __iommu_queue_command(iommu, cmd);
	ret = __iommu_queue_command(iommu, cmd);
	if (!ret)
		iommu->need_sync = 1;
	spin_unlock_irqrestore(&iommu->lock, flags);
	spin_unlock_irqrestore(&iommu->lock, flags);


	return ret;
	return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
	cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
	cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
	CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
	CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);


	iommu->need_sync = 0;

	spin_lock_irqsave(&iommu->lock, flags);
	spin_lock_irqsave(&iommu->lock, flags);


	if (!iommu->need_sync)
		goto out;

	iommu->need_sync = 0;

	ret = __iommu_queue_command(iommu, &cmd);
	ret = __iommu_queue_command(iommu, &cmd);


	if (ret)
	if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)


	ret = iommu_queue_command(iommu, &cmd);
	ret = iommu_queue_command(iommu, &cmd);


	iommu->need_sync = 1;

	return ret;
	return ret;
}
}


@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,


	ret = iommu_queue_command(iommu, &cmd);
	ret = iommu_queue_command(iommu, &cmd);


	iommu->need_sync = 1;

	return ret;
	return ret;
}
}


@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);


	iommu_queue_inv_dev_entry(iommu, devid);
	iommu_queue_inv_dev_entry(iommu, devid);

	iommu->need_sync = 1;
}
}


/*****************************************************************************
/*****************************************************************************
@@ -1034,7 +1033,6 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
	if (addr == bad_dma_address)
	if (addr == bad_dma_address)
		goto out;
		goto out;


	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);
	iommu_completion_wait(iommu);


out:
out:
@@ -1063,7 +1061,6 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,


	__unmap_single(iommu, domain->priv, dma_addr, size, dir);
	__unmap_single(iommu, domain->priv, dma_addr, size, dir);


	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);
	iommu_completion_wait(iommu);


	spin_unlock_irqrestore(&domain->lock, flags);
	spin_unlock_irqrestore(&domain->lock, flags);
@@ -1130,7 +1127,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
			goto unmap;
			goto unmap;
	}
	}


	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);
	iommu_completion_wait(iommu);


out:
out:
@@ -1176,7 +1172,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
		s->dma_address = s->dma_length = 0;
		s->dma_address = s->dma_length = 0;
	}
	}


	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);
	iommu_completion_wait(iommu);


	spin_unlock_irqrestore(&domain->lock, flags);
	spin_unlock_irqrestore(&domain->lock, flags);
@@ -1228,7 +1223,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
		goto out;
		goto out;
	}
	}


	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);
	iommu_completion_wait(iommu);


out:
out:
@@ -1260,7 +1254,6 @@ static void free_coherent(struct device *dev, size_t size,


	__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
	__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);


	if (unlikely(iommu->need_sync))
	iommu_completion_wait(iommu);
	iommu_completion_wait(iommu);


	spin_unlock_irqrestore(&domain->lock, flags);
	spin_unlock_irqrestore(&domain->lock, flags);