Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c256319f authored by Liam Mark's avatar Liam Mark Committed by Ashwini Muduganti
Browse files

ion: Don't create a kernel mapping in begin_cpu_access



Currently ION buffer kernel mappings are being created during
begin_cpu_access and are removed during end_cpu_access.

The begin_cpu_access and end_cpu_access calls don't need a kernel mapping
and creating kernel mappings unnecessarily for large buffers can be
expensive because of the alloc_vmap_area call.

Change ION so that kernel mappings are only created when they are needed,
which is for calls such as dma_buf_kmap and dma_buf_vmap.

Change-Id: I60b06455bedab1a7bfab83eb6013475c8ae638b5
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent 50907dbd
Loading
Loading
Loading
Loading
+41 −44
Original line number Diff line number Diff line
@@ -183,8 +183,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,

void ion_buffer_destroy(struct ion_buffer *buffer)
{
	if (WARN_ON_ONCE(buffer->kmap_cnt > 0))
	if (buffer->kmap_cnt > 0) {
		pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n");
		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
	}
	buffer->heap->ops->free(buffer);
	kfree(buffer);
}
@@ -229,7 +231,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
	if (buffer->kmap_cnt == 0) {
		pr_warn_ratelimited("Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access, pid:%d\n",
		pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
				    current->pid);
		return;
	}
@@ -508,29 +510,56 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
	kfree(dmabuf->exp_name);
}

static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
{
	struct ion_buffer *buffer = dmabuf->priv;
	void *vaddr = ERR_PTR(-EINVAL);

	WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_kmap\n");
	return buffer->vaddr + offset * PAGE_SIZE;
	if (buffer->heap->ops->map_kernel) {
		mutex_lock(&buffer->lock);
		vaddr = ion_buffer_kmap_get(buffer);
		mutex_unlock(&buffer->lock);
	} else {
		pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
				    buffer->heap->name);
	}

static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
			       void *ptr)
{
	return vaddr;
}

static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
	struct ion_buffer *buffer = dmabuf->priv;

	WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_vmap\n");
	return buffer->vaddr;
	if (buffer->heap->ops->map_kernel) {
		mutex_lock(&buffer->lock);
		ion_buffer_kmap_put(buffer);
		mutex_unlock(&buffer->lock);
	}
}

static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
	/*
	 * TODO: Once clients remove their hacks where they assume kmap(ed)
	 * addresses are virtually contiguous implement this properly
	 */
	void *vaddr = ion_dma_buf_vmap(dmabuf);

	if (IS_ERR(vaddr))
		return vaddr;

	return vaddr + offset * PAGE_SIZE;
}

static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
			       void *ptr)
{
	/*
	 * TODO: Once clients remove their hacks where they assume kmap(ed)
	 * addresses are virtually contiguous implement this properly
	 */
	ion_dma_buf_vunmap(dmabuf, ptr);
}

static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
@@ -616,7 +645,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
					  bool sync_only_mapped)
{
	struct ion_buffer *buffer = dmabuf->priv;
	void *vaddr;
	struct ion_dma_buf_attachment *a;
	int ret = 0;

@@ -629,15 +657,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		goto out;
	}

	/*
	 * TODO: Move this elsewhere because we don't always need a vaddr
	 */
	if (buffer->heap->ops->map_kernel) {
		mutex_lock(&buffer->lock);
		vaddr = ion_buffer_kmap_get(buffer);
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						    true, direction,
@@ -732,12 +751,6 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		goto out;
	}

	if (buffer->heap->ops->map_kernel) {
		mutex_lock(&buffer->lock);
		ion_buffer_kmap_put(buffer);
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						  true, direction,
@@ -840,7 +853,6 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
						unsigned int len)
{
	struct ion_buffer *buffer = dmabuf->priv;
	void *vaddr;
	struct ion_dma_buf_attachment *a;
	int ret = 0;

@@ -853,15 +865,6 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		goto out;
	}

	/*
	 * TODO: Move this elsewhere because we don't always need a vaddr
	 */
	if (buffer->heap->ops->map_kernel) {
		mutex_lock(&buffer->lock);
		vaddr = ion_buffer_kmap_get(buffer);
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						    true, dir,
@@ -942,12 +945,6 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
		goto out;
	}

	if (buffer->heap->ops->map_kernel) {
		mutex_lock(&buffer->lock);
		ion_buffer_kmap_put(buffer);
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						  true, direction,