Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 403b2f1f authored by Rohit Vaswani's avatar Rohit Vaswani Committed by Matt Wagantall
Browse files

Merge branch 'smmu_from_3.14' into branch 'msm-3.18'



Port missing SMMU features and fixes from msm-3.14 to msm-3.18.

* smmu_from_3.14 (49 commits):
  iommu: io-pgtable-arm: unmap correct addresses in error path
  iommu/arm-smmu: restrict virtual address range to 36 bits
  iommu: io-pgtable: use size_t for unmap return value
  iommu: io-pgtable-arm: set page tables as outer shareable
  ARM: dts: msm8996: Update implementation defined registers
  cnss: Set new attribute when attach to SMMU driver
  Revert "arm64: dma-mapping: avoid calling iommu_iova_to_phys"
  ipa: set IOMMU domain attributes before attaching
  ipa: set atomic IOMMU domain attr
  arm64: dma-mapping: Split large pages when allocating in atomic context
  arm64: dma_mapping: Add support for .dma_mapping_error() ops
  arm64: dma_mapping: Implement .dma_supported()
  iommu/arm-smmu: Implement .dma_supported()
  iommu: Add support for .dma_supported() ops
  iommu/arm-smmu: Add support for map/unmap to be atomic
  iommu: io-pgtable-arm: flush tlb for stale mappings
  iommu/arm-smmu: silently succeed the secure VMID domain attr
  iommu: add more debug for iommu_pgsize failure
  iommu: io-pgtable-arm: use correct mask during iova_to_phys
  iommu: io-pgtable-arm: make selftests more thorough
  dma-mapping: check for failed IOVA allocation
  iommu: io-pgtable-arm: add support for IOMMU_DEVICE
  iommu/arm-smmu: respect DOMAIN_ATTR_COHERENT_HTW_DISABLE
  iommu: io-pgtable-arm: unmap last level all at once
  iommu/arm-smmu: add missing cleanup to attach error path
  iommu: io-pgtable-arm: Flush all tlbs at end of unmap
  iommu/arm-smmu: wire up .map_sg from the ARM LPAE io-pgtable
  iommu: io-pgtable-arm: add self-test for .mag_sg
  iommu: io-pgtable-arm: implement .map_sg
  iommu: io-pgtable: add .map_sg op to io_pgtable_ops
  iommu: export iommu_pgsize
  iommu/arm-smmu: enable clocks in tlb invalidation routines
  iommu/arm-smmu: re-use the init_mutex for protecting smmu_domain.smmu
  iommu/arm-smmu: implement DOMAIN_ATTR_PT_BASE_ADDR using ttbr[0]
  iommu/arm-smmu: save the pgtbl_cfg in the domain
  iommu: io-pgtable-arm: add non-secure quirk
  iommu/arm-smmu: make use of generic LPAE allocator
  iommu: add self-consistency tests to ARM LPAE IO page table allocator
  iommu: add ARM LPAE page table allocator
  iommu: introduce generic page table allocation framework
  Revert "iommu/arm-smmu: implement DOMAIN_ATTR_PT_BASE_ADDR"
  iommu/arm-smmu: add support for DOMAIN_ATTR_NESTING attribute
  iommu/arm-smmu: Play nice on non-ARM/SMMU systems
  iommu/arm-smmu: change IOMMU_EXEC to IOMMU_NOEXEC
  iommu/arm-smmu: Check of_match_node for NULL
  Revert "iommu/arm-smmu: Add support for page table donation"
  Revert "Add support of the IOMMU_DEVICE flag."
  Revert "iommu/arm-smmu: Make the arm_smmu_map operation atomic"
  Revert "iommu/arm-smmu: change IOMMU_EXEC to IOMMU_NOEXEC"

Change-Id: I2274db9d702c3f21c9ee9184265a9e9998a43629
Signed-off-by: default avatarRohit Vaswani <rvaswani@codeaurora.org>
parents 76247c57 64e19d89
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1562,6 +1562,7 @@ M: Will Deacon <will.deacon@arm.com>
L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S:	Maintained
F:	drivers/iommu/arm-smmu.c
F:	drivers/iommu/io-pgtable-arm.c

ARM64 PORT (AARCH64 ARCHITECTURE)
M:	Catalin Marinas <catalin.marinas@arm.com>
+3 −2
Original line number Diff line number Diff line
@@ -95,7 +95,8 @@
		<0x6b18 0x3>,
		<0x6b24 0x204>,
		<0x6b28 0x11000>,
		<0x6b30 0x800>;
		<0x6b30 0x800>,
		<0x6b08 0x1000>;
};

&vfe_smmu {
+0 −1
Original line number Diff line number Diff line
@@ -17,7 +17,6 @@ struct dma_iommu_mapping {
	size_t			bits;
	unsigned int		order;
	dma_addr_t		base;
	phys_addr_t		phys;

	spinlock_t		lock;
	struct kref		kref;
+76 −21
Original line number Diff line number Diff line
@@ -60,10 +60,12 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);

static void *__alloc_from_pool(size_t size, struct page **ret_page)
static void *__alloc_from_pool(size_t size, struct page **ret_pages)
{
	unsigned long val;
	void *ptr = NULL;
	int count = size >> PAGE_SHIFT;
	int i;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
@@ -73,8 +75,10 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		for (i = 0; i < count ; i++) {
			ret_pages[i] = phys_to_page(phys);
			phys += 1 << PAGE_SHIFT;
		}
		ptr = (void *)val;
	}

@@ -203,11 +207,23 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
	size = PAGE_ALIGN(size);

	if (!(flags & __GFP_WAIT)) {
		struct page *page = NULL;
		void *addr = __alloc_from_pool(size, &page);
		struct page **page = NULL;
		int count = size >> PAGE_SHIFT;
		int array_size = count * sizeof(struct page *);
		void *addr;

		if (array_size <= PAGE_SIZE)
			page = kzalloc(array_size, flags);
		else
			page = vzalloc(array_size);

		if (!page)
			return NULL;

		addr = __alloc_from_pool(size, page);

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));
			*dma_handle = phys_to_dma(dev, page_to_phys(*page));

		return addr;

@@ -790,7 +806,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)

		len = (j - i) << PAGE_SHIFT;
		ret = iommu_map(mapping->domain, iova, phys, len,
				IOMMU_READ|IOMMU_WRITE|IOMMU_NOEXEC);
				IOMMU_READ|IOMMU_WRITE);
		if (ret < 0)
			goto fail;
		iova += len;
@@ -847,24 +863,37 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
	return NULL;
}

static void *__iommu_alloc_atomic(struct device *dev, size_t size,
				  dma_addr_t *handle)
void *__iommu_alloc_atomic(struct device *dev, size_t size,
				  dma_addr_t *handle, gfp_t gfp)
{
	struct page *page;
	struct page **pages;
	int count = size >> PAGE_SHIFT;
	int array_size = count * sizeof(struct page *);
	void *addr;

	addr = __alloc_from_pool(size, &page);
	if (!addr)
	if (array_size <= PAGE_SIZE)
		pages = kzalloc(array_size, gfp);
	else
		pages = vzalloc(array_size);

	if (!pages)
		return NULL;

	*handle = __iommu_create_mapping(dev, &page, size);
	addr = __alloc_from_pool(size, pages);
	if (!addr)
		goto err_free;

	*handle = __iommu_create_mapping(dev, pages, size);
	if (*handle == DMA_ERROR_CODE)
		goto err_mapping;

	kvfree(pages);
	return addr;

err_mapping:
	__free_from_pool(addr, size);
err_free:
	kvfree(pages);
	return NULL;
}

@@ -886,7 +915,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
	size = PAGE_ALIGN(size);

	if (!(gfp & __GFP_WAIT))
		return __iommu_alloc_atomic(dev, size, handle);
		return __iommu_alloc_atomic(dev, size, handle, gfp);

	/*
	 * Following is a work-around (a.k.a. hack) to prevent pages
@@ -1150,6 +1179,11 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
		total_length += s->length;

	iova = __alloc_iova(mapping, total_length);
	if (iova == DMA_ERROR_CODE) {
		dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
		return 0;
	}

	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
	if (ret != total_length) {
		__free_iova(mapping, iova, total_length);
@@ -1276,7 +1310,6 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t dma_addr;
	int ret, prot, len = PAGE_ALIGN(size + offset);
	phys_addr_t phys = page_to_phys(page);

	dma_addr = __alloc_iova(mapping, len);
	if (dma_addr == DMA_ERROR_CODE)
@@ -1284,13 +1317,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,

	prot = __dma_direction_to_prot(dir);

	ret = iommu_map(mapping->domain, dma_addr, phys, len,
	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
			prot);
	if (ret < 0)
		goto fail;

	mapping->phys = phys;

	return dma_addr + offset;
fail:
	__free_iova(mapping, dma_addr, len);
@@ -1357,7 +1388,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(mapping->phys);
	struct page *page = phys_to_page(iommu_iova_to_phys(
						mapping->domain, iova));
	int offset = handle & ~PAGE_MASK;
	int len = PAGE_ALIGN(size + offset);

@@ -1376,7 +1408,8 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(mapping->phys);
	struct page *page = phys_to_page(iommu_iova_to_phys(
						mapping->domain, iova));
	unsigned int offset = handle & ~PAGE_MASK;

	if (!iova)
@@ -1390,7 +1423,8 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(mapping->phys);
	struct page *page = phys_to_page(iommu_iova_to_phys(
						mapping->domain, iova));
	unsigned int offset = handle & ~PAGE_MASK;

	if (!iova)
@@ -1399,6 +1433,24 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
	__dma_page_cpu_to_dev(page, offset, size, dir);
}

static int arm_iommu_dma_supported(struct device *dev, u64 mask)
{
	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);

	if (!mapping) {
		dev_warn(dev, "No IOMMU mapping for device\n");
		return 0;
	}

	return iommu_dma_supported(mapping->domain, dev, mask);
}

static int arm_iommu_mapping_error(struct device *dev,
				   dma_addr_t dma_addr)
{
	return dma_addr == DMA_ERROR_CODE;
}

const struct dma_map_ops iommu_ops = {
	.alloc		= arm_iommu_alloc_attrs,
	.free		= arm_iommu_free_attrs,
@@ -1416,6 +1468,8 @@ const struct dma_map_ops iommu_ops = {
	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,

	.set_dma_mask		= arm_dma_set_mask,
	.dma_supported		= arm_iommu_dma_supported,
	.mapping_error		= arm_iommu_mapping_error,
};

const struct dma_map_ops iommu_coherent_ops = {
@@ -1431,6 +1485,7 @@ const struct dma_map_ops iommu_coherent_ops = {
	.unmap_sg	= arm_coherent_iommu_unmap_sg,

	.set_dma_mask	= arm_dma_set_mask,
	.dma_supported   = arm_iommu_dma_supported,
};

/**
+1 −1
Original line number Diff line number Diff line
@@ -1182,7 +1182,7 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,

	addr = (unsigned int) memdesc->gpuaddr;

	flags = IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC;
	flags = IOMMU_READ | IOMMU_WRITE;

	/* Set up the protection for the page(s) */
	if (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)
Loading