Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 253aacc2 authored by Isaac J. Manjarres's avatar Isaac J. Manjarres
Browse files

ion: msm: Add support for dynamically preventing dma-buf mappings



The current rules for determining if a buffer can or cannot be mapped
to depend on how a buffer was allocated. This is restrictive
in environments where the security state of a buffer can change
dynamically, as the accessibility of the buffer has to change with
respect to the security state of the buffer.

Thus, add support to track the number of userspace mappings associated
with an ION buffer, as well as an interface to allow drivers to lock
and unlock a buffer. In this context, locking a buffer means that the
buffer will no longer be mappable, and the buffer does not
have any outstanding mappings at the time the buffer is locked.
Unlocking the buffer means that the buffer can be mapped again.

Change-Id: I6aa73b9ac7c301b12106ad3d3bcb4c2aac959e55
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent 5dbd5d63
Loading
Loading
Loading
Loading
+28 −2
Original line number Diff line number Diff line
@@ -74,6 +74,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
	phys_addr_t paddr;
	int ret;
	struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
	struct msm_ion_buf_lock_state *lock_state;
	struct device *dev = carveout_heap->heap.dev;

	table = kmalloc(sizeof(*table), GFP_KERNEL);
@@ -83,10 +84,17 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
	if (ret)
		goto err_free;

	lock_state = kzalloc(sizeof(*lock_state), GFP_KERNEL);
	if (!lock_state) {
		ret = -ENOMEM;
		goto err_free_table;
	}
	buffer->priv_virt = lock_state;

	paddr = ion_carveout_allocate(heap, size);
	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
		ret = -ENOMEM;
		goto err_free_table;
		goto err_free_umap;
	}

	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
@@ -99,6 +107,8 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,

	return 0;

err_free_umap:
	kfree(lock_state);
err_free_table:
	sg_free_table(table);
err_free:
@@ -110,18 +120,26 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
	struct ion_heap *heap = buffer->heap;
	struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;
	struct sg_table *table = buffer->sg_table;
	struct page *page = sg_page(table->sgl);
	phys_addr_t paddr = page_to_phys(page);
	struct device *dev = carveout_heap->heap.dev;

	mutex_lock(&buffer->lock);
	if (hlos_accessible_buffer(buffer))
		ion_buffer_zero(buffer);

	if (lock_state && lock_state->locked)
		pr_warn("%s: buffer is locked while being freed\n", __func__);
	mutex_unlock(&buffer->lock);

	if (ion_buffer_cached(buffer))
		ion_pages_sync_for_device(dev, page, buffer->size,
					  DMA_BIDIRECTIONAL);

	ion_carveout_free(heap, paddr, buffer->size);
	kfree(buffer->priv_virt);
	sg_free_table(table);
	kfree(table);
}
@@ -370,6 +388,7 @@ static void ion_sc_heap_free(struct ion_buffer *buffer)
{
	struct ion_heap *child;
	struct sg_table *table = buffer->sg_table;
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;
	struct page *page = sg_page(table->sgl);
	phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));

@@ -379,9 +398,16 @@ static void ion_sc_heap_free(struct ion_buffer *buffer)
		return;
	}

	mutex_lock(&buffer->lock);
	if (hlos_accessible_buffer(buffer))
		ion_buffer_zero(buffer);

	if (lock_state && lock_state->locked)
		pr_warn("%s: buffer is locked while being freed\n", __func__);
	mutex_unlock(&buffer->lock);

	ion_carveout_free(child, paddr, buffer->size);
	kfree(buffer->priv_virt);
	sg_free_table(table);
	kfree(table);
}
+8 −0
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
{
	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
	struct sg_table *table;
	struct msm_ion_buf_lock_state *lock_state;
	struct page *pages;
	unsigned long size = PAGE_ALIGN(len);
	unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -96,6 +97,12 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
	sg_set_page(table->sgl, pages, size, 0);
;
	buffer->sg_table = table;

	lock_state = kzalloc(sizeof(*lock_state), GFP_KERNEL);
	if (!lock_state)
		goto free_mem;
	buffer->priv_virt = lock_state;

	ion_prepare_sgl_for_force_dma_sync(buffer->sg_table);
	return 0;

@@ -117,6 +124,7 @@ static void ion_cma_free(struct ion_buffer *buffer)
	/* release sg table */
	sg_free_table(buffer->sg_table);
	kfree(buffer->sg_table);
	kfree(buffer->priv_virt);
}

static struct ion_heap_ops ion_cma_ops = {
+4 −0
Original line number Diff line number Diff line
@@ -279,6 +279,8 @@ int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,

bool hlos_accessible_buffer(struct ion_buffer *buffer)
{
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;

	if ((buffer->flags & ION_FLAG_SECURE) &&
	    !(buffer->flags & ION_FLAG_CP_HLOS) &&
	    !(buffer->flags & ION_FLAG_CP_SPSS_HLOS_SHARED))
@@ -287,6 +289,8 @@ bool hlos_accessible_buffer(struct ion_buffer *buffer)
		 !(buffer->flags & ION_FLAG_CP_HLOS) &&
		 !(buffer->flags & ION_FLAG_CP_SPSS_HLOS_SHARED))
		return false;
	else if (lock_state && lock_state->locked)
		return false;

	return true;
}
+18 −1
Original line number Diff line number Diff line
@@ -269,6 +269,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
				    unsigned long flags)
{
	struct ion_system_heap *sys_heap = to_system_heap(heap);
	struct msm_ion_buf_lock_state *lock_state;
	struct sg_table *table;
	struct sg_table table_sync = {0};
	struct scatterlist *sg;
@@ -382,6 +383,14 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	buffer->sg_table = table;
	if (nents_sync)
		sg_free_table(&table_sync);

	lock_state = kzalloc(sizeof(*lock_state), GFP_KERNEL);
	if (!lock_state) {
		ret = -ENOMEM;
		goto err_free_sg2;
	}
	buffer->priv_virt = lock_state;

	ion_prepare_sgl_for_force_dma_sync(buffer->sg_table);
	return 0;

@@ -415,10 +424,11 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	return ret;
}

void ion_system_heap_free(struct ion_buffer *buffer)
static void ion_system_heap_free(struct ion_buffer *buffer)
{
	struct ion_heap *heap = buffer->heap;
	struct ion_system_heap *sys_heap = to_system_heap(heap);
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;
	struct sg_table *table = buffer->sg_table;
	struct scatterlist *sg;
	int i;
@@ -426,8 +436,14 @@ void ion_system_heap_free(struct ion_buffer *buffer)

	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
	    !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
		mutex_lock(&buffer->lock);
		if (hlos_accessible_buffer(buffer))
			ion_buffer_zero(buffer);

		if (lock_state && lock_state->locked)
			pr_warn("%s: buffer is locked while being freed\n",
				__func__);
		mutex_unlock(&buffer->lock);
	} else if (vmid > 0) {
		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
			return;
@@ -438,6 +454,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
				 get_order(sg->length));
	sg_free_table(table);
	kfree(table);
	kfree(buffer->priv_virt);
}

static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+124 −28
Original line number Diff line number Diff line
@@ -160,6 +160,7 @@ static struct sg_table
	table = a->table;

	map_attrs = attachment->dma_map_attrs;
	mutex_lock(&buffer->lock);
	if (!(buffer->flags & ION_FLAG_CACHED) ||
	    !hlos_accessible_buffer(buffer))
		map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
@@ -175,10 +176,10 @@ static struct sg_table
	    !(buffer->flags & ION_FLAG_CACHED)) {
		pr_warn_ratelimited("dev:%s Cannot DMA map uncached buffer as IO-coherent attrs:0x%lx\n",
				    dev_name(attachment->dev), map_attrs);
		mutex_unlock(&buffer->lock);
		return ERR_PTR(-EINVAL);
	}

	mutex_lock(&buffer->lock);
	if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
		trace_ion_dma_map_cmo_skip(attachment->dev,
					   ino,
@@ -243,6 +244,7 @@ static void msm_ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
	struct ion_dma_buf_attachment *a = attachment->priv;
	unsigned long ino = file_inode(attachment->dmabuf->file)->i_ino;

	mutex_lock(&buffer->lock);
	map_attrs = attachment->dma_map_attrs;
	if (!(buffer->flags & ION_FLAG_CACHED) ||
	    !hlos_accessible_buffer(buffer))
@@ -253,7 +255,6 @@ static void msm_ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
	    dev_is_dma_coherent_hint_cached(attachment->dev))
		map_attrs |= DMA_ATTR_FORCE_COHERENT;

	mutex_lock(&buffer->lock);
	if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
		trace_ion_dma_unmap_cmo_skip(attachment->dev,
					     ino,
@@ -297,31 +298,134 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page,
	dma_sync_sg_for_device(dev, &sg, 1, dir);
}

static void __msm_ion_vm_open(struct ion_buffer *buffer)
{
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;

	lock_state->vma_count++;
}

static void msm_ion_vm_open(struct vm_area_struct *vma)
{
	struct ion_buffer *buffer = vma->vm_private_data;

	mutex_lock(&buffer->lock);
	__msm_ion_vm_open(buffer);
	mutex_unlock(&buffer->lock);
}

static void msm_ion_vm_close(struct vm_area_struct *vma)
{
	struct ion_buffer *buffer = vma->vm_private_data;
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;

	mutex_lock(&buffer->lock);
	lock_state->vma_count--;
	mutex_unlock(&buffer->lock);
}

static const struct vm_operations_struct msm_ion_vma_ops = {
	.open = msm_ion_vm_open,
	.close = msm_ion_vm_close,
};

static int msm_ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
	struct ion_buffer *buffer = dmabuf->priv;
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;
	int ret = 0;

	mutex_lock(&buffer->lock);
	if (!hlos_accessible_buffer(buffer)) {
		pr_err_ratelimited("%s: this buffer cannot be mapped to userspace\n",
				   __func__);
		mutex_unlock(&buffer->lock);
		return -EINVAL;
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

	mutex_lock(&buffer->lock);
	/* now map it to userspace */
	ret = ion_heap_map_user(buffer->heap, buffer, vma);
	mutex_unlock(&buffer->lock);

	if (ret)
	if (ret) {
		pr_err("%s: failure mapping buffer to userspace\n",
		       __func__);
	} else if (lock_state) {
		vma->vm_private_data = buffer;
		vma->vm_ops = &msm_ion_vma_ops;
		__msm_ion_vm_open(buffer);
	}

	mutex_unlock(&buffer->lock);
	return ret;
}

static bool is_msm_ion_dma_buf(struct ion_buffer *buffer)
{
	return buffer->heap->buf_ops.attach == msm_ion_dma_buf_attach;
}

int msm_ion_dma_buf_lock(struct dma_buf *dmabuf)
{
	struct ion_buffer *buffer;
	struct msm_ion_buf_lock_state *lock_state;
	int ret;

	if (!dmabuf)
		return -EINVAL;

	buffer = dmabuf->priv;
	lock_state = buffer->priv_virt;

	if ((!lock_state) || !is_msm_ion_dma_buf(buffer)) {
		pr_err("%s: userspace map locking is not supported for this dma-buf\n",
		       __func__);
		return -EINVAL;
	}

	mutex_lock(&buffer->lock);
	if (lock_state->locked) {
		ret = -EINVAL;
		pr_err("%s: buffer is already locked\n", __func__);
	} else if (lock_state->vma_count) {
		ret = -EBUSY;
	} else {
		ret = 0;
		lock_state->locked = true;
	}
	mutex_unlock(&buffer->lock);

	return ret;
}
EXPORT_SYMBOL(msm_ion_dma_buf_lock);

void msm_ion_dma_buf_unlock(struct dma_buf *dmabuf)
{
	struct ion_buffer *buffer;
	struct msm_ion_buf_lock_state *lock_state;

	if (!dmabuf)
		return;

	buffer = dmabuf->priv;
	lock_state = buffer->priv_virt;

	if (!lock_state || !is_msm_ion_dma_buf(buffer)) {
		pr_err("%s: userspace map unlocking is not supported for this dma-buf\n",
		       __func__);
		return;
	}

	mutex_lock(&buffer->lock);
	if (!lock_state->locked)
		pr_warn("%s: buffer is already unlocked\n", __func__);
	else
		lock_state->locked = false;
	mutex_unlock(&buffer->lock);
}
EXPORT_SYMBOL(msm_ion_dma_buf_unlock);

static void msm_ion_dma_buf_release(struct dma_buf *dmabuf)
{
@@ -336,14 +440,13 @@ static void *msm_ion_dma_buf_vmap(struct dma_buf *dmabuf)
	struct ion_buffer *buffer = dmabuf->priv;
	void *vaddr = ERR_PTR(-EINVAL);

	if (hlos_accessible_buffer(buffer)) {
	mutex_lock(&buffer->lock);
	if (hlos_accessible_buffer(buffer))
		vaddr = msm_ion_buffer_kmap_get(buffer);
		mutex_unlock(&buffer->lock);
	} else {
	else
		pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
				    buffer->heap->name);
	}
	mutex_unlock(&buffer->lock);

	return vaddr;
}
@@ -352,12 +455,11 @@ static void msm_ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
	struct ion_buffer *buffer = dmabuf->priv;

	if (hlos_accessible_buffer(buffer)) {
	mutex_lock(&buffer->lock);
	if (hlos_accessible_buffer(buffer))
		msm_ion_buffer_kmap_put(buffer);
	mutex_unlock(&buffer->lock);
}
}

static void *msm_ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
@@ -447,6 +549,7 @@ static int msm_ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
	unsigned long ino = file_inode(dmabuf->file)->i_ino;
	int ret = 0;

	mutex_lock(&buffer->lock);
	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, ino,
						    ion_buffer_cached(buffer),
@@ -461,7 +564,6 @@ static int msm_ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		goto out;
	}

	mutex_lock(&buffer->lock);

	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = msm_ion_heap_device(buffer->heap);
@@ -471,8 +573,6 @@ static int msm_ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,

		trace_ion_begin_cpu_access_cmo_apply(dev, ino, true, true,
						     direction);

		mutex_unlock(&buffer->lock);
		goto out;
	}

@@ -491,8 +591,8 @@ static int msm_ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		trace_ion_begin_cpu_access_cmo_apply(a->dev, ino, true,
						     true, direction);
	}
	mutex_unlock(&buffer->lock);
out:
	mutex_unlock(&buffer->lock);
	return ret;
}

@@ -504,6 +604,7 @@ static int msm_ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
	unsigned long ino = file_inode(dmabuf->file)->i_ino;
	int ret = 0;

	mutex_lock(&buffer->lock);
	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, ino,
						  ion_buffer_cached(buffer),
@@ -518,7 +619,6 @@ static int msm_ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = msm_ion_heap_device(buffer->heap);
		struct sg_table *table = buffer->sg_table;
@@ -528,7 +628,6 @@ static int msm_ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,

		trace_ion_end_cpu_access_cmo_apply(dev, ino, true,
						   true, direction);
		mutex_unlock(&buffer->lock);
		goto out;
	}

@@ -547,9 +646,9 @@ static int msm_ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		trace_ion_end_cpu_access_cmo_apply(a->dev, ino, true,
						   true, direction);
	}
	mutex_unlock(&buffer->lock);

out:
	mutex_unlock(&buffer->lock);
	return ret;
}

@@ -563,6 +662,7 @@ static int msm_ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
	unsigned long ino = file_inode(dmabuf->file)->i_ino;
	int ret = 0;

	mutex_lock(&buffer->lock);
	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, ino,
						    ion_buffer_cached(buffer),
@@ -577,7 +677,6 @@ static int msm_ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = msm_ion_heap_device(buffer->heap);
		struct sg_table *table = buffer->sg_table;
@@ -591,7 +690,6 @@ static int msm_ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		else
			trace_ion_begin_cpu_access_cmo_skip(dev, ino,
							    true, true, dir);
		mutex_unlock(&buffer->lock);
		goto out;
	}

@@ -618,9 +716,9 @@ static int msm_ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
			ret = tmp;
		}
	}
	mutex_unlock(&buffer->lock);

out:
	mutex_unlock(&buffer->lock);
	return ret;
}

@@ -635,6 +733,7 @@ static int msm_ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,

	int ret = 0;

	mutex_lock(&buffer->lock);
	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, ino,
						  ion_buffer_cached(buffer),
@@ -649,7 +748,6 @@ static int msm_ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = msm_ion_heap_device(buffer->heap);
		struct sg_table *table = buffer->sg_table;
@@ -665,8 +763,6 @@ static int msm_ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
			trace_ion_end_cpu_access_cmo_skip(dev, ino,
							  true, true,
							  direction);

		mutex_unlock(&buffer->lock);
		goto out;
	}

@@ -695,9 +791,9 @@ static int msm_ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
			ret = tmp;
		}
	}
	mutex_unlock(&buffer->lock);

out:
	mutex_unlock(&buffer->lock);
	return ret;
}

Loading