Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9905cf67 authored by Liam Mark's avatar Liam Mark
Browse files

staging: android: ion: Add support to force DMA sync



Currently ION doesn't always DMA sync the memory it allocates and
it doesn't always do a DMA sync in response to a begin/end cpu access
call.

Add support in ION, via the CONFIG_ION_FORCE_DMA_SYNC config, to force a
DMA sync when ION memory is allocated and when calls to begin/end cpu
access are made.

This makes it possible to force ION's cache maintenance to behave like
it did on the previous version of ION.

Change-Id: Ifb2da086e46647697129c20206b24e249092d7f7
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent f397df63
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -43,3 +43,15 @@ config ION_CMA_HEAP
	  Choose this option to enable CMA heaps with Ion. This heap is backed
	  by the Contiguous Memory Allocator (CMA). If your system has these
	  regions, you should say Y here.

config ION_FORCE_DMA_SYNC
	bool "Force ION to always DMA sync buffer memory"
	depends on ION
	help
	  Force ION to DMA sync buffer memory when it is allocated and to
	  always DMA sync the buffer memory on calls to begin/end cpu
	  access. This makes ION DMA sync behavior similar to that of the
	  older version of ION.
	  We generally don't want to enable this config as it breaks the
	  cache maintenance model.
	  If you're not sure say N here.
+72 −0
Original line number Diff line number Diff line
@@ -145,6 +145,24 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
	INIT_LIST_HEAD(&buffer->attachments);
	INIT_LIST_HEAD(&buffer->vmas);
	mutex_init(&buffer->lock);

	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		int i;
		struct scatterlist *sg;

		/*
		 * this will set up dma addresses for the sglist -- it is not
		 * technically correct as per the dma api -- a specific
		 * device isn't really taking ownership here.  However, in
		 * practice on our systems the only dma_address space is
		 * physical addresses.
		 */
		for_each_sg(table->sgl, sg, table->nents, i) {
			sg_dma_address(sg) = sg_phys(sg);
			sg_dma_len(sg) = sg->length;
		}
	}

	mutex_lock(&dev->buffer_lock);
	ion_buffer_add(dev, buffer);
	mutex_unlock(&dev->buffer_lock);
@@ -538,6 +556,23 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		goto out;

	mutex_lock(&buffer->lock);

	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
					    table->nents, &buffer->vmas,
					    direction, true);
		else
			dma_sync_sg_for_cpu(dev, table->sgl,
					    table->nents, direction);

		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
			continue;
@@ -579,6 +614,21 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		goto out;

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
					    table->nents, &buffer->vmas,
					    direction, false);
		else
			dma_sync_sg_for_device(dev, table->sgl,
					       table->nents, direction);
		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
			continue;
@@ -649,6 +699,17 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		goto out;

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		ion_sgl_sync_range(dev, table->sgl, table->nents,
				   offset, len, dir, true);

		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
			continue;
@@ -686,6 +747,17 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
		goto out;

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		ion_sgl_sync_range(dev, table->sgl, table->nents,
				   offset, len, direction, false);

		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
			continue;
+4 −0
Original line number Diff line number Diff line
@@ -52,7 +52,11 @@
 * As default set to 'false' since ION allocations
 * are no longer required to be DMA ready
 */
#ifdef CONFIG_ION_FORCE_DMA_SYNC
#define MAKE_ION_ALLOC_DMA_READY 1
#else
#define MAKE_ION_ALLOC_DMA_READY 0
#endif

/**
 * struct ion_platform_heap - defines a heap in the given platform