Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac5f536e authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ion: add support for secure dma allocations"

parents dc369afd 55c1de1e
Loading
Loading
Loading
Loading
+157 −0
Original line number Diff line number Diff line
@@ -25,12 +25,28 @@

DEFINE_MUTEX(secure_buffer_mutex);

struct cp2_mem_chunks {
	u32 chunk_list;
	u32 chunk_list_size;
	u32 chunk_size;
} __attribute__ ((__packed__));

struct cp2_lock_req {
	struct cp2_mem_chunks chunks;
	u32 mem_usage;
	u32 lock;
} __attribute__ ((__packed__));

struct mem_prot_info {
	phys_addr_t addr;
	u64 size;
};

#define MEM_PROT_ASSIGN_ID		0x16
#define MEM_PROTECT_LOCK_ID2		0x0A
#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
#define V2_CHUNK_SIZE           SZ_1M
#define FEATURE_ID_CP 12

struct dest_vm_and_perm_info {
	u32 vm;
@@ -42,6 +58,134 @@ struct dest_vm_and_perm_info {
static void *qcom_secure_mem;
#define QCOM_SECURE_MEM_SIZE (512*1024)

static int secure_buffer_change_chunk(u32 chunks,
				u32 nchunks,
				u32 chunk_size,
				int lock)
{
	struct cp2_lock_req request;
	u32 resp;
	int ret;
	struct scm_desc desc = {0};

	desc.args[0] = request.chunks.chunk_list = chunks;
	desc.args[1] = request.chunks.chunk_list_size = nchunks;
	desc.args[2] = request.chunks.chunk_size = chunk_size;
	/* Usage is now always 0 */
	desc.args[3] = request.mem_usage = 0;
	desc.args[4] = request.lock = lock;
	desc.args[5] = 0;
	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
				SCM_VAL);

	kmap_flush_unused();
	kmap_atomic_flush_unused();

	if (!is_scm_armv8()) {
		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
				&request, sizeof(request), &resp, sizeof(resp));
	} else {
		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
		resp = desc.ret[0];
	}

	return ret;
}

static int secure_buffer_change_table(struct sg_table *table, int lock)
{
	int i, j;
	int ret = -EINVAL;
	u32 *chunk_list;
	struct scatterlist *sg;

	for_each_sg(table->sgl, sg, table->nents, i) {
		int nchunks;
		int size = sg->length;
		int chunk_list_len;
		phys_addr_t chunk_list_phys;

		/*
		 * This should theoretically be a phys_addr_t but the protocol
		 * indicates this should be a u32.
		 */
		u32 base;
		u64 tmp = sg_dma_address(sg);

		WARN((tmp >> 32) & 0xffffffff,
			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
			__func__, sg, tmp);
		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
			WARN(1,
				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
				__func__, i, size, V2_CHUNK_SIZE);
			return -EINVAL;
		}

		base = (u32)tmp;

		nchunks = size / V2_CHUNK_SIZE;
		chunk_list_len = sizeof(u32)*nchunks;

		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);

		if (!chunk_list)
			return -ENOMEM;

		chunk_list_phys = virt_to_phys(chunk_list);
		for (j = 0; j < nchunks; j++)
			chunk_list[j] = base + j * V2_CHUNK_SIZE;

		/*
		 * Flush the chunk list before sending the memory to the
		 * secure environment to ensure the data is actually present
		 * in RAM
		 */
		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);

		ret = secure_buffer_change_chunk(chunk_list_phys,
				nchunks, V2_CHUNK_SIZE, lock);

		if (!ret) {
			/*
			 * Set or clear the private page flag to communicate the
			 * status of the chunk to other entities
			 */
			if (lock)
				SetPagePrivate(sg_page(sg));
			else
				ClearPagePrivate(sg_page(sg));
		}

		kfree(chunk_list);
	}

	return ret;
}

int msm_secure_table(struct sg_table *table)
{
	int ret;

	mutex_lock(&secure_buffer_mutex);
	ret = secure_buffer_change_table(table, 1);
	mutex_unlock(&secure_buffer_mutex);

	return ret;
}

int msm_unsecure_table(struct sg_table *table)
{
	int ret;

	mutex_lock(&secure_buffer_mutex);
	ret = secure_buffer_change_table(table, 0);
	mutex_unlock(&secure_buffer_mutex);

	return ret;
}

static struct dest_vm_and_perm_info *
populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
		   size_t *size_in_bytes)
@@ -279,6 +423,19 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
	}
}

#define MAKE_CP_VERSION(major, minor, patch) \
	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))

bool msm_secure_v2_is_supported(void)
{
	/*
	 * if the version is < 1.1.0 then dynamic buffer allocation is
	 * not supported
	 */
	return (scm_get_feat_version(FEATURE_ID_CP) >=
			MAKE_CP_VERSION(1, 1, 0));
}

static int __init alloc_secure_shared_memory(void)
{
	int ret = 0;
+4 −2
Original line number Diff line number Diff line
obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
			ion_system_secure_heap.o
			ion_carveout_heap.o ion_chunk_heap.o ion_system_secure_heap.o
ifdef CONFIG_ION_MSM
obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
endif
obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_ION) += compat_ion.o
+902 −0

File added.

Preview size limit exceeded, changes collapsed.

+1 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <linux/rbtree.h>
#include <linux/seq_file.h>

#include "msm_ion_priv.h"
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
+57 −0
Original line number Diff line number Diff line
@@ -404,6 +404,28 @@ static int msm_init_extra_data(struct device_node *node,
			ret = -ENOMEM;
		break;
	}
	case ION_HEAP_TYPE_SECURE_DMA:
	{
		unsigned int val;
		struct ion_cma_pdata *extra = NULL;

		ret = of_property_read_u32(node,
					   "qcom,default-prefetch-size", &val);
		if (!ret) {
			heap->extra_data = kzalloc(sizeof(*extra),
						   GFP_KERNEL);

			if (!heap->extra_data) {
				ret = -ENOMEM;
			} else {
				extra = heap->extra_data;
				extra->default_prefetch_size = val;
			}
		} else {
			ret = 0;
		}
		break;
	}
	default:
		heap->extra_data = 0;
		break;
@@ -423,6 +445,7 @@ static struct heap_types_info {
	MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
	MAKE_HEAP_TYPE_MAPPING(CHUNK),
	MAKE_HEAP_TYPE_MAPPING(DMA),
	MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
	MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
	MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
};
@@ -609,6 +632,16 @@ int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
}

int ion_heap_allow_secure_allocation(enum ion_heap_type type)
{
	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
}

int ion_heap_allow_handle_secure(enum ion_heap_type type)
{
	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
}

int ion_heap_allow_heap_secure(enum ion_heap_type type)
{
	return false;
@@ -795,6 +828,13 @@ long msm_ion_custom_ioctl(struct ion_client *client,
	{
		int ret;

		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
			ION_HEAP_TYPE_SECURE_DMA,
			(void *)data.prefetch_data.len,
			ion_secure_cma_prefetch);
		if (ret)
			return ret;

		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
				     ION_HEAP_TYPE_SYSTEM_SECURE,
				     (void *)&data.prefetch_data,
@@ -806,6 +846,13 @@ long msm_ion_custom_ioctl(struct ion_client *client,
	case ION_IOC_DRAIN:
	{
		int ret;
		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
				     ION_HEAP_TYPE_SECURE_DMA,
				     (void *)data.prefetch_data.len,
				     ion_secure_cma_drain_pool);

		if (ret)
			return ret;

		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
				     ION_HEAP_TYPE_SYSTEM_SECURE,
@@ -959,6 +1006,11 @@ static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
	struct ion_heap *heap = NULL;

	switch ((int)heap_data->type) {
#ifdef CONFIG_CMA
	case ION_HEAP_TYPE_SECURE_DMA:
		heap = ion_secure_cma_heap_create(heap_data);
		break;
#endif
	case ION_HEAP_TYPE_SYSTEM_SECURE:
		heap = ion_system_secure_heap_create(heap_data);
		break;
@@ -988,6 +1040,11 @@ static void msm_ion_heap_destroy(struct ion_heap *heap)
		return;

	switch ((int)heap->type) {
#ifdef CONFIG_CMA
	case ION_HEAP_TYPE_SECURE_DMA:
		ion_secure_cma_heap_destroy(heap);
		break;
#endif
	case ION_HEAP_TYPE_SYSTEM_SECURE:
		ion_system_secure_heap_destroy(heap);
		break;
Loading