Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2568e9b2 authored by Laura Abbott's avatar Laura Abbott
Browse files

gpu: ion: Pull secure buffers out of Ion



Securing individual buffers does not belong in the Ion framework.
Pull the code out of the framework and into a file which can be
managed separately. The file can be moved elsewhere at a later
date.

Change-Id: Ifaec740abcd78ee1bc3769a6c73014e4309c82be
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
parent 795c7f2e
Loading
Loading
Loading
Loading
+0 −74
Original line number Diff line number Diff line
@@ -235,19 +235,12 @@ err2:
	return ERR_PTR(ret);
}

static void ion_delayed_unsecure(struct ion_buffer *buffer)
{
	if (buffer->heap->ops->unsecure_buffer)
		buffer->heap->ops->unsecure_buffer(buffer, 1);
}

static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
	if (WARN_ON(buffer->kmap_cnt > 0))
		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
	buffer->heap->ops->unmap_dma(buffer->heap, buffer);

	ion_delayed_unsecure(buffer);
	buffer->heap->ops->free(buffer);
	if (buffer->flags & ION_FLAG_CACHED)
		kfree(buffer->dirty);
@@ -1693,73 +1686,6 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
	up_write(&dev->lock);
}

int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
			int version, void *data, int flags)
{
	int ret = -EINVAL;
	struct ion_heap *heap;
	struct ion_buffer *buffer;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		WARN(1, "%s: invalid handle passed to secure.\n", __func__);
		goto out_unlock;
	}

	buffer = handle->buffer;
	heap = buffer->heap;

	if (!ion_heap_allow_handle_secure(heap->type)) {
		pr_err("%s: cannot secure buffer from non secure heap\n",
			__func__);
		goto out_unlock;
	}

	BUG_ON(!buffer->heap->ops->secure_buffer);
	/*
	 * Protect the handle via the client lock to ensure we aren't
	 * racing with free
	 */
	ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);

out_unlock:
	mutex_unlock(&client->lock);
	return ret;
}

int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
{
	int ret = -EINVAL;
	struct ion_heap *heap;
	struct ion_buffer *buffer;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		WARN(1, "%s: invalid handle passed to secure.\n", __func__);
		goto out_unlock;
	}

	buffer = handle->buffer;
	heap = buffer->heap;

	if (!ion_heap_allow_handle_secure(heap->type)) {
		pr_err("%s: cannot secure buffer from non secure heap\n",
			__func__);
		goto out_unlock;
	}

	BUG_ON(!buffer->heap->ops->unsecure_buffer);
	/*
	 * Protect the handle via the client lock to ensure we aren't
	 * racing with free
	 */
	ret = buffer->heap->ops->unsecure_buffer(buffer, 0);

out_unlock:
	mutex_unlock(&client->lock);
	return ret;
}

int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
			void *data)
{
+2 −20
Original line number Diff line number Diff line
@@ -34,10 +34,6 @@
#define ION_CMA_ALLOCATE_FAILED NULL

struct ion_secure_cma_buffer_info {
	/*
	 * This needs to come first for compatibility with the secure buffer API
	 */
	struct ion_cp_buffer secure;
	dma_addr_t phys;
	struct sg_table *table;
	bool is_cached;
@@ -460,8 +456,6 @@ static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
	ion_secure_cma_get_sgtable(sheap->dev,
			info->table, info->phys, len);

	info->secure.buffer = info->phys;

	/* keep this for memory release */
	buffer->priv_virt = info;
	dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer);
@@ -498,17 +492,7 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
	if (buf) {
		int ret;

		buf->secure.want_delayed_unsecure = 0;
		atomic_set(&buf->secure.secure_cnt, 0);
		mutex_init(&buf->secure.lock);
		buf->secure.is_secure = 1;
		buf->secure.ignore_check = true;

		/*
		 * make sure the size is set before trying to secure
		 */
		buffer->size = len;
		ret = ion_cp_secure_buffer(buffer, ION_CP_V2, 0, 0);
		ret = msm_ion_secure_table(buf->table, 0, 0, true);
		if (ret) {
			/*
			 * Don't treat the secure buffer failing here as an
@@ -532,7 +516,7 @@ static void ion_secure_cma_free(struct ion_buffer *buffer)
	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;

	dev_dbg(sheap->dev, "Release buffer %p\n", buffer);
	ion_cp_unsecure_buffer(buffer, 1);
	msm_ion_unsecure_table(info->table);
	atomic_sub(buffer->size, &sheap->total_allocated);
	BUG_ON(atomic_read(&sheap->total_allocated) < 0);
	/* release memory */
@@ -642,8 +626,6 @@ static struct ion_heap_ops ion_secure_cma_ops = {
	.map_kernel = ion_secure_cma_map_kernel,
	.unmap_kernel = ion_secure_cma_unmap_kernel,
	.print_debug = ion_secure_cma_print_debug,
	.secure_buffer = ion_cp_secure_buffer,
	.unsecure_buffer = ion_cp_unsecure_buffer,
};

struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
+62 −2
Original line number Diff line number Diff line
@@ -100,6 +100,19 @@ enum {
	HEAP_PROTECTED = 1,
};

/*  SCM related code for locking down memory for content protection */

#define SCM_CP_LOCK_CMD_ID	0x1
#define SCM_CP_PROTECT		0x1
#define SCM_CP_UNPROTECT	0x0

struct cp_lock_msg {
	unsigned int start;
	unsigned int end;
	unsigned int permission_type;
	unsigned char lock;
} __attribute__ ((__packed__));

#define DMA_ALLOC_TRIES	5

static int allocate_heap_memory(struct ion_heap *heap)
@@ -734,8 +747,6 @@ static struct ion_heap_ops cp_heap_ops = {
	.print_debug = ion_cp_print_debug,
	.secure_heap = ion_cp_secure_heap,
	.unsecure_heap = ion_cp_unsecure_heap,
	.secure_buffer = ion_cp_secure_buffer,
	.unsecure_buffer = ion_cp_unsecure_buffer,
};

struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
@@ -832,4 +843,53 @@ void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
	*size = cp_heap->total_size;
}

static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
			      unsigned int permission_type)
{
	struct cp_lock_msg cmd;
	cmd.start = phy_base;
	cmd.end = phy_base + size;
	cmd.permission_type = permission_type;
	cmd.lock = SCM_CP_PROTECT;

	return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
			&cmd, sizeof(cmd), NULL, 0);
}

static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
				unsigned int permission_type)
{
	struct cp_lock_msg cmd;
	cmd.start = phy_base;
	cmd.end = phy_base + size;
	cmd.permission_type = permission_type;
	cmd.lock = SCM_CP_UNPROTECT;

	return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
			&cmd, sizeof(cmd), NULL, 0);
}

int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
			      unsigned int permission_type, int version,
			      void *data)
{
	switch (version) {
	case ION_CP_V1:
		return ion_cp_protect_mem_v1(phy_base, size, permission_type);
	default:
		return -EINVAL;
	}
}

int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
			      unsigned int permission_type, int version,
			      void *data)
{
	switch (version) {
	case ION_CP_V1:
		return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
	default:
		return -EINVAL;
	}
}
+0 −3
Original line number Diff line number Diff line
@@ -118,9 +118,6 @@ struct ion_heap_ops {
			   const struct rb_root *mem_map);
	int (*secure_heap)(struct ion_heap *heap, int version, void *data);
	int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
	int (*secure_buffer)(struct ion_buffer *buffer, int version,
				void *data, int flags);
	int (*unsecure_buffer)(struct ion_buffer *buffer, int force_unsecure);
};

/**
+1 −1
Original line number Diff line number Diff line
obj-y += msm_ion.o ion_cp_common.o ion_iommu_map.o
obj-y += msm_ion.o ion_iommu_map.o secure_buffer.o
Loading