Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0f671ed8 authored by Patrick Daly's avatar Patrick Daly Committed by Saravana Kannan
Browse files

ANDROID: GKI: arm64: dma-mapping: add support for IOMMU mapper



On systems with IOMMUs, it's useful to handle IOMMU mappings in the
dma-mapping layer. This is currently supported on arm but not arm64. Add
support in arm64 by gratuitously lifting most of the IOMMU-related stuff
from dma-mapping.c in arm.

The original arm work was done by Marek Szyprowski in [4ce63fcd:
"ARM: dma-mapping: add support for IOMMU mapper"].

Change-Id: I1c3c8fe15049fe456751074398fd179ebd2ec64e
Signed-off-by: default avatarMitchel Humpherys <mitchelh@codeaurora.org>
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
Bug: 155522481
Signed-off-by: default avatarMark Salyzyn <salyzyn@google.com>
[saravanak snapshot from commit 79efc458af96 that approximately matches
commit f0dbb6af93e971a24e272267dc6d22676900873d]
Signed-off-by: default avatarSaravana Kannan <saravanak@google.com>
parent f7853bfd
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -860,6 +860,37 @@ config ARCH_HAS_CACHE_LINE_SIZE
config CC_HAVE_SHADOW_CALL_STACK
	def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)

config ARM64_DMA_USE_IOMMU
	bool "ARM64 DMA iommu integration"
	select ARM_HAS_SG_CHAIN
	select NEED_SG_DMA_LENGTH
	help
	  Enable using iommu through the standard dma apis.
	  dma_alloc_coherent() will allocate scatter-gather memory
	  which is made virtually contiguous via iommu.
	  Enable if system contains IOMMU hardware.

if ARM64_DMA_USE_IOMMU

config ARM64_DMA_IOMMU_ALIGNMENT
	int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
	range 4 9
	default 9
	help
	  DMA mapping framework by default aligns all buffers to the smallest
	  PAGE_SIZE order which is greater than or equal to the requested buffer
	  size. This works well for buffers up to a few hundreds kilobytes, but
	  for larger buffers it just a waste of address space. Drivers which has
	  relatively small addressing window (like 64Mib) might run out of
	  virtual space with just a few allocations.

	  With this parameter you can specify the maximum PAGE_SIZE order for
	  DMA IOMMU buffers. Larger buffers will be aligned only to this
	  specified order. The order is expressed as a power of two multiplied
	  by the PAGE_SIZE.

endif

config SECCOMP
	bool "Enable seccomp to safely compute untrusted bytecode"
	---help---
+9 −0
Original line number Diff line number Diff line
@@ -24,9 +24,18 @@ struct dev_archdata {
	const struct dma_map_ops *dev_dma_ops;
#endif
	bool dma_coherent;
#ifdef CONFIG_ARM64_DMA_USE_IOMMU
	struct dma_iommu_mapping       *mapping;
#endif
};

struct pdev_archdata {
};

#ifdef CONFIG_ARM64_DMA_USE_IOMMU
#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
#else
#define to_dma_iommu_mapping(dev) NULL
#endif

#endif
+35 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef ASMARM_DMA_IOMMU_H
#define ASMARM_DMA_IOMMU_H

#ifdef __KERNEL__

#include <linux/err.h>
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/kref.h>
#include <linux/dma-mapping-fast.h>

struct dma_iommu_mapping {
	/* iommu specific data */
	struct iommu_domain	*domain;
	bool			init;
	struct kref		kref;
	const struct dma_map_ops *ops;

	/* Protects bitmap */
	spinlock_t		lock;
	void			*bitmap;
	size_t			bits;
	dma_addr_t		base;
};

#ifdef CONFIG_ARM64_DMA_USE_IOMMU
void arm_iommu_put_dma_cookie(struct iommu_domain *domain);
#else  /* !CONFIG_ARM64_DMA_USE_IOMMU */
static inline void arm_iommu_put_dma_cookie(struct iommu_domain *domain) {}
#endif	/* CONFIG_ARM64_DMA_USE_IOMMU */

#endif /* __KERNEL__ */
#endif
+147 −0
Original line number Diff line number Diff line
@@ -26,13 +26,17 @@
#include <linux/genalloc.h>
#include <linux/dma-direct.h>
#include <linux/dma-contiguous.h>
#include <linux/mm.h>
#include <linux/iommu.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/dma-removed.h>
#include <linux/pci.h>
#include <linux/io.h>

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/dma-iommu.h>

static int swiotlb __ro_after_init;

@@ -960,3 +964,146 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
#endif
}
EXPORT_SYMBOL_GPL(arch_setup_dma_ops);

#ifdef CONFIG_ARM64_DMA_USE_IOMMU

/* guards initialization of default_domain->iova_cookie */
static DEFINE_MUTEX(iommu_dma_init_mutex);

static int
iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
{
	struct iommu_domain *domain = mapping->domain;
	dma_addr_t dma_base = mapping->base;
	u64 size = mapping->bits << PAGE_SHIFT;
	int ret;
	bool own_cookie;

	/*
	 * if own_cookie is false, then we are sharing the iova_cookie with
	 * another driver, and should not free it on error. Cleanup will be
	 * done when the iommu_domain is freed.
	 */
	own_cookie = !domain->iova_cookie;

	if (own_cookie) {
		ret = iommu_get_dma_cookie(domain);
		if (ret) {
			dev_err(dev, "iommu_get_dma_cookie failed: %d\n", ret);
			return ret;
		}
	}

	ret = iommu_dma_init_domain(domain, dma_base, size, dev);
	if (ret) {
		dev_err(dev, "iommu_dma_init_domain failed: %d\n", ret);
		if (own_cookie)
			iommu_put_dma_cookie(domain);
		return ret;
	}

	mapping->ops = &iommu_dma_ops;
	return 0;
}

static int arm_iommu_get_dma_cookie(struct device *dev,
				    struct dma_iommu_mapping *mapping)
{
	int s1_bypass = 0;
	int err = 0;

	mutex_lock(&iommu_dma_init_mutex);

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);

	if (s1_bypass)
		mapping->ops = &arm64_swiotlb_dma_ops;
	else
		err = iommu_init_mapping(dev, mapping);

	mutex_unlock(&iommu_dma_init_mutex);
	return err;
}

/*
 * Checks for "qcom,iommu-dma-addr-pool" property.
 * If not present, leaves dma_addr and dma_size unmodified.
 */
static void arm_iommu_get_dma_window(struct device *dev, u64 *dma_addr,
					u64 *dma_size)
{
	struct device_node *np;
	int naddr, nsize, len;
	const __be32 *ranges;

	if (!dev->of_node)
		return;

	np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
	if (!np)
		np = dev->of_node;

	ranges = of_get_property(np, "qcom,iommu-dma-addr-pool", &len);
	if (!ranges)
		return;

	len /= sizeof(u32);
	naddr = of_n_addr_cells(np);
	nsize = of_n_size_cells(np);
	if (len < naddr + nsize) {
		dev_err(dev, "Invalid length for qcom,iommu-dma-addr-pool, expected %d cells\n",
			naddr + nsize);
		return;
	}
	if (naddr == 0 || nsize == 0) {
		dev_err(dev, "Invalid #address-cells %d or #size-cells %d\n",
			naddr, nsize);
		return;
	}

	*dma_addr = of_read_number(ranges, naddr);
	*dma_size = of_read_number(ranges + naddr, nsize);
}

static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
{
	struct iommu_domain *domain;
	struct iommu_group *group;
	struct dma_iommu_mapping mapping = {0};

	group = dev->iommu_group;
	if (!group)
		return;

	arm_iommu_get_dma_window(dev, &dma_base, &size);

	domain = iommu_get_domain_for_dev(dev);
	if (!domain)
		return;

	/* Allow iommu-debug to call arch_setup_dma_ops to reconfigure itself */
	if (domain->type != IOMMU_DOMAIN_DMA &&
	    !of_device_is_compatible(dev->of_node, "iommu-debug-test")) {
		dev_err(dev, "Invalid iommu domain type!\n");
		return;
	}

	mapping.base = dma_base;
	mapping.bits = size >> PAGE_SHIFT;
	mapping.domain = domain;

	if (arm_iommu_get_dma_cookie(dev, &mapping)) {
		dev_err(dev, "Failed to get dma cookie\n");
		return;
	}

	set_dma_ops(dev, mapping.ops);
}

#else /*!CONFIG_ARM64_DMA_USE_IOMMU */

static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
{
}
#endif
+1 −0
Original line number Diff line number Diff line
@@ -337,6 +337,7 @@ config ARM_SMMU
	select IOMMU_API
	select IOMMU_IO_PGTABLE_LPAE
	select ARM_DMA_USE_IOMMU if ARM
	select ARM64_DMA_USE_IOMMU if ARM64
	help
	  Support for implementations of the ARM System MMU architecture
	  versions 1 and 2.