Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27b5c27b authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes I1432454a,Ifb2da086,I5cff7baa into msm-4.14

* changes:
  staging: android: ion: add ftrace logging for cache maintenance
  staging: android: ion: Add support to force DMA sync
  dma-buf: Make dma-buf anon file name unique
parents dbf5a41b d8506176
Loading
Loading
Loading
Loading
+19 −2
Original line number Diff line number Diff line
@@ -34,9 +34,13 @@
#include <linux/poll.h>
#include <linux/reservation.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/atomic.h>

#include <uapi/linux/dma-buf.h>

static atomic_long_t name_counter;

static inline int is_dma_buf_file(struct file *);

struct dma_buf_list {
@@ -77,6 +81,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
		reservation_object_fini(dmabuf->resv);

	module_put(dmabuf->owner);
	kfree(dmabuf->name);
	kfree(dmabuf);
	return 0;
}
@@ -407,7 +412,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	struct reservation_object *resv = exp_info->resv;
	struct file *file;
	size_t alloc_size = sizeof(struct dma_buf);
	char *bufname;
	int ret;
	long cnt;

	if (!exp_info->resv)
		alloc_size += sizeof(struct reservation_object);
@@ -429,10 +436,17 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	if (!try_module_get(exp_info->owner))
		return ERR_PTR(-ENOENT);

	cnt = atomic_long_inc_return(&name_counter);
	bufname = kasprintf(GFP_KERNEL, "dmabuf%ld", cnt);
	if (!bufname) {
		ret = -ENOMEM;
		goto err_module;
	}

	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
	if (!dmabuf) {
		ret = -ENOMEM;
		goto err_module;
		goto err_name;
	}

	dmabuf->priv = exp_info->priv;
@@ -443,6 +457,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	init_waitqueue_head(&dmabuf->poll);
	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
	dmabuf->name = bufname;

	if (!resv) {
		resv = (struct reservation_object *)&dmabuf[1];
@@ -450,7 +465,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
	}
	dmabuf->resv = resv;

	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
	file = anon_inode_getfile(bufname, &dma_buf_fops, dmabuf,
					exp_info->flags);
	if (IS_ERR(file)) {
		ret = PTR_ERR(file);
@@ -471,6 +486,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)

err_dmabuf:
	kfree(dmabuf);
err_name:
	kfree(bufname);
err_module:
	module_put(exp_info->owner);
	return ERR_PTR(ret);
+12 −0
Original line number Diff line number Diff line
@@ -43,3 +43,15 @@ config ION_CMA_HEAP
	  Choose this option to enable CMA heaps with Ion. This heap is backed
	  by the Contiguous Memory Allocator (CMA). If your system has these
	  regions, you should say Y here.

config ION_FORCE_DMA_SYNC
	bool "Force ION to always DMA sync buffer memory"
	depends on ION
	help
	  Force ION to DMA sync buffer memory when it is allocated and to
	  always DMA sync the buffer memory on calls to begin/end cpu
	  access. This makes ION DMA sync behavior similar to that of the
	  older version of ION.
	  We generally don't want to enable this config as it breaks the
	  cache maintenance model.
	  If you're not sure say N here.
+200 −8
Original line number Diff line number Diff line
@@ -40,6 +40,8 @@
#include <linux/sched/task.h>
#include <linux/bitops.h>
#include <linux/msm_dma_iommu_mapping.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ion.h>
#include <soc/qcom/secure_buffer.h>

#include "ion.h"
@@ -145,6 +147,24 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
	INIT_LIST_HEAD(&buffer->attachments);
	INIT_LIST_HEAD(&buffer->vmas);
	mutex_init(&buffer->lock);

	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		int i;
		struct scatterlist *sg;

		/*
		 * this will set up dma addresses for the sglist -- it is not
		 * technically correct as per the dma api -- a specific
		 * device isn't really taking ownership here.  However, in
		 * practice on our systems the only dma_address space is
		 * physical addresses.
		 */
		for_each_sg(table->sgl, sg, table->nents, i) {
			sg_dma_address(sg) = sg_phys(sg);
			sg_dma_len(sg) = sg->length;
		}
	}

	mutex_lock(&dev->buffer_lock);
	ion_buffer_add(dev, buffer);
	mutex_unlock(&dev->buffer_lock);
@@ -315,6 +335,21 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
	    !hlos_accessible_buffer(buffer))
		map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;

	if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
		trace_ion_dma_map_cmo_skip(attachment->dev,
					   attachment->dmabuf->name,
					   ion_buffer_cached(buffer),
					   hlos_accessible_buffer(buffer),
					   attachment->dma_map_attrs,
					   direction);
	else
		trace_ion_dma_map_cmo_apply(attachment->dev,
					    attachment->dmabuf->name,
					    ion_buffer_cached(buffer),
					    hlos_accessible_buffer(buffer),
					    attachment->dma_map_attrs,
					    direction);

	if (map_attrs & DMA_ATTR_DELAYED_UNMAP) {
		count = msm_dma_map_sg_attrs(attachment->dev, table->sgl,
					     table->nents, direction,
@@ -345,6 +380,21 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
	    !hlos_accessible_buffer(buffer))
		map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;

	if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
		trace_ion_dma_unmap_cmo_skip(attachment->dev,
					     attachment->dmabuf->name,
					     ion_buffer_cached(buffer),
					     hlos_accessible_buffer(buffer),
					     attachment->dma_map_attrs,
					     direction);
	else
		trace_ion_dma_unmap_cmo_apply(attachment->dev,
					      attachment->dmabuf->name,
					      ion_buffer_cached(buffer),
					      hlos_accessible_buffer(buffer),
					      attachment->dma_map_attrs,
					      direction);

	if (map_attrs & DMA_ATTR_DELAYED_UNMAP)
		msm_dma_unmap_sg_attrs(attachment->dev, table->sgl,
				       table->nents, direction,
@@ -528,6 +578,10 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name,
						    ion_buffer_cached(buffer),
						    false, direction,
						    sync_only_mapped);
		ret = -EPERM;
		goto out;
	}
@@ -541,13 +595,48 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						    true, direction,
						    sync_only_mapped);
		goto out;
	}

	mutex_lock(&buffer->lock);

	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
						     true, true, direction,
						     sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
					    table->nents, &buffer->vmas,
					    direction, true);
		else
			dma_sync_sg_for_cpu(dev, table->sgl,
					    table->nents, direction);

		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_begin_cpu_access_notmapped(a->dev,
							     dmabuf->name,
							     true, true,
							     direction,
							     sync_only_mapped);
			continue;
		}

		trace_ion_begin_cpu_access_cmo_apply(a->dev, dmabuf->name,
						     true, true, direction,
						     sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(a->dev, a->table->sgl,
@@ -572,6 +661,10 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name,
						  ion_buffer_cached(buffer),
						  false, direction,
						  sync_only_mapped);
		ret = -EPERM;
		goto out;
	}
@@ -582,13 +675,46 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						  true, direction,
						  sync_only_mapped);
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
						   true, true, direction,
						   sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
					    table->nents, &buffer->vmas,
					    direction, false);
		else
			dma_sync_sg_for_device(dev, table->sgl,
					       table->nents, direction);
		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_end_cpu_access_notmapped(a->dev,
							   dmabuf->name,
							   true, true,
							   direction,
							   sync_only_mapped);
			continue;
		}

		trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
						   true, true, direction,
						   sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(a->dev, a->table->sgl,
@@ -639,6 +765,10 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name,
						    ion_buffer_cached(buffer),
						    false, dir,
						    false);
		ret = -EPERM;
		goto out;
	}
@@ -652,13 +782,42 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						    true, dir,
						    false);
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
						     true, true, dir,
						     false);

		ion_sgl_sync_range(dev, table->sgl, table->nents,
				   offset, len, dir, true);

		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_begin_cpu_access_notmapped(a->dev,
							     dmabuf->name,
							     true, true,
							     dir,
							     false);
			continue;
		}

		trace_ion_begin_cpu_access_cmo_apply(a->dev, dmabuf->name,
						     true, true, dir,
						     false);

		ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
				   offset, len, dir, true);
@@ -679,6 +838,10 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name,
						  ion_buffer_cached(buffer),
						  false, direction,
						  false);
		ret = -EPERM;
		goto out;
	}
@@ -689,13 +852,42 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						  true, direction,
						  false);
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
						   true, true, direction,
						   false);

		ion_sgl_sync_range(dev, table->sgl, table->nents,
				   offset, len, direction, false);

		mutex_unlock(&buffer->lock);
		goto out;
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_end_cpu_access_notmapped(a->dev,
							   dmabuf->name,
							   true, true,
							   direction,
							   false);
			continue;
		}

		trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
						   true, true, direction,
						   false);

		ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
				   offset, len, direction, false);
+4 −0
Original line number Diff line number Diff line
@@ -52,7 +52,11 @@
 * As default set to 'false' since ION allocations
 * are no longer required to be DMA ready
 */
#ifdef CONFIG_ION_FORCE_DMA_SYNC
#define MAKE_ION_ALLOC_DMA_READY 1
#else
#define MAKE_ION_ALLOC_DMA_READY 0
#endif

/**
 * struct ion_platform_heap - defines a heap in the given platform
+2 −0
Original line number Diff line number Diff line
@@ -382,6 +382,7 @@ struct dma_buf_ops {
 * @vmapping_counter: used internally to refcnt the vmaps
 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
 * @exp_name: name of the exporter; useful for debugging.
 * @name: unique name for the buffer
 * @owner: pointer to exporter module; used for refcounting when exporter is a
 *         kernel module.
 * @list_node: node for dma_buf accounting and debugging.
@@ -409,6 +410,7 @@ struct dma_buf {
	unsigned vmapping_counter;
	void *vmap_ptr;
	const char *exp_name;
	char *name;
	struct module *owner;
	struct list_head list_node;
	void *priv;
Loading