Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4da70b9e authored by Joerg Roedel's avatar Joerg Roedel Committed by Ingo Molnar
Browse files

x86, AMD IOMMU: add dma_ops mapping functions for single mappings



This patch adds the dma_ops specific mapping functions for single mappings.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cb76c322
Loading
Loading
Loading
Loading
+59 −0
Original line number Original line Diff line number Diff line
@@ -40,6 +40,11 @@ struct command {
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
			     struct unity_map_entry *e);
			     struct unity_map_entry *e);


static int iommu_has_npcache(struct amd_iommu *iommu)
{
	return iommu->cap & IOMMU_CAP_NPCACHE;
}

static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
{
{
	u32 tail, head;
	u32 tail, head;
@@ -641,3 +646,57 @@ static void __unmap_single(struct amd_iommu *iommu,
	dma_ops_free_addresses(dma_dom, dma_addr, pages);
	dma_ops_free_addresses(dma_dom, dma_addr, pages);
}
}


static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
			     size_t size, int dir)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;
	dma_addr_t addr;

	get_device_resources(dev, &iommu, &domain, &devid);

	if (iommu == NULL || domain == NULL)
		return (dma_addr_t)paddr;

	spin_lock_irqsave(&domain->lock, flags);
	addr = __map_single(dev, iommu, domain->priv, paddr, size, dir);
	if (addr == bad_dma_address)
		goto out;

	if (iommu_has_npcache(iommu))
		iommu_flush_pages(iommu, domain->id, addr, size);

	if (iommu->need_sync)
		iommu_completion_wait(iommu);

out:
	spin_unlock_irqrestore(&domain->lock, flags);

	return addr;
}

static void unmap_single(struct device *dev, dma_addr_t dma_addr,
			 size_t size, int dir)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;

	if (!get_device_resources(dev, &iommu, &domain, &devid))
		return;

	spin_lock_irqsave(&domain->lock, flags);

	__unmap_single(iommu, domain->priv, dma_addr, size, dir);

	iommu_flush_pages(iommu, domain->id, dma_addr, size);

	if (iommu->need_sync)
		iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);
}