Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50bfbd11 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ADSPRPC: Use delayed unmap for SMMU mappings"

parents 579f3ae0 7c879e98
Loading
Loading
Loading
Loading
+19 −8
Original line number Diff line number Diff line
@@ -777,18 +777,23 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
/*
 * Create a mapping in device IO address space for specified pages
 */
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
static dma_addr_t __iommu_create_mapping(struct device *dev,
					struct page **pages, size_t size,
					struct dma_attrs *attrs)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t dma_addr, iova;
	int i, ret;
	int prot = IOMMU_READ | IOMMU_WRITE;

	dma_addr = __alloc_iova(mapping, size);
	if (dma_addr == DMA_ERROR_CODE)
		return dma_addr;

	if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
		prot |= IOMMU_NOEXEC;

	iova = dma_addr;
	for (i = 0; i < count; ) {
		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
@@ -800,8 +805,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
				break;

		len = (j - i) << PAGE_SHIFT;
		ret = iommu_map(mapping->domain, iova, phys, len,
				IOMMU_READ|IOMMU_WRITE|IOMMU_NOEXEC);
		ret = iommu_map(mapping->domain, iova, phys, len, prot);
		if (ret < 0)
			goto fail;
		iova += len;
@@ -859,7 +863,7 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
}

void *__iommu_alloc_atomic(struct device *dev, size_t size,
				  dma_addr_t *handle, gfp_t gfp)
			dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
	struct page **pages;
	int count = size >> PAGE_SHIFT;
@@ -878,7 +882,7 @@ void *__iommu_alloc_atomic(struct device *dev, size_t size,
	if (!addr)
		goto err_free;

	*handle = __iommu_create_mapping(dev, pages, size);
	*handle = __iommu_create_mapping(dev, pages, size, attrs);
	if (*handle == DMA_ERROR_CODE)
		goto err_mapping;

@@ -910,7 +914,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
	size = PAGE_ALIGN(size);

	if (!(gfp & __GFP_WAIT))
		return __iommu_alloc_atomic(dev, size, handle, gfp);
		return __iommu_alloc_atomic(dev, size, handle, gfp, attrs);

	/*
	 * Following is a work-around (a.k.a. hack) to prevent pages
@@ -925,7 +929,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
	if (!pages)
		return NULL;

	*handle = __iommu_create_mapping(dev, pages, size);
	*handle = __iommu_create_mapping(dev, pages, size, attrs);
	if (*handle == DMA_ERROR_CODE)
		goto err_buffer;

@@ -1067,6 +1071,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
					dir);

		prot = __dma_direction_to_prot(dir);
		if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
			prot |= IOMMU_NOEXEC;

		ret = iommu_map(mapping->domain, iova, phys, len, prot);
		if (ret < 0)
@@ -1179,6 +1185,9 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
		return 0;
	}

	if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
		prot |= IOMMU_NOEXEC;

	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
	if (ret != total_length) {
		__free_iova(mapping, iova, total_length);
@@ -1311,6 +1320,8 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
		return dma_addr;

	prot = __dma_direction_to_prot(dir);
	if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
		prot |= IOMMU_NOEXEC;

	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
			prot);
+8 −4
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/iommu.h>
#include <linux/kref.h>
#include <linux/sort.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <asm/dma-iommu.h>
#include "adsprpc_compat.h"
#include "adsprpc_shared.h"
@@ -308,8 +309,8 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
	if (map->refs)
		return;
	if (map->size || map->phys)
		dma_unmap_sg(fl->sctx->smmu.dev, map->table->sgl,
				map->table->nents, DMA_BIDIRECTIONAL);
		msm_dma_unmap_sg(fl->sctx->smmu.dev, map->table->sgl,
				map->table->nents, DMA_BIDIRECTIONAL, map->buf);
	if (!IS_ERR_OR_NULL(map->table))
		dma_buf_unmap_attachment(map->attach, map->table,
				DMA_BIDIRECTIONAL);
@@ -325,6 +326,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, uintptr_t va,
{
	struct fastrpc_session_ctx *sess = fl->sctx;
	struct fastrpc_mmap *map = 0;
	struct dma_attrs attrs;
	int err = 0;
	if (!fastrpc_mmap_find(fl, fd, va, len, ppmap))
		return 0;
@@ -345,9 +347,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, uintptr_t va,
				DMA_BIDIRECTIONAL)));
	if (err)
		goto bail;
	VERIFY(err, map->table->nents == dma_map_sg(sess->smmu.dev,
	init_dma_attrs(&attrs);
	dma_set_attr(DMA_ATTR_EXEC_MAPPING, &attrs);
	VERIFY(err, map->table->nents == msm_dma_map_sg_attrs(sess->smmu.dev,
				map->table->sgl, map->table->nents,
				DMA_BIDIRECTIONAL));
				DMA_BIDIRECTIONAL, map->buf, &attrs));
	if (err)
		goto bail;
	map->phys = sg_dma_address(map->table->sgl);