Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e117411 authored by Patrick Daly's avatar Patrick Daly
Browse files

ion: carveout: Add secure carveout heap type



A secure carveout heap manages memory that may never be accessed by HLOS.
Several pools of differing types of secure memory may be selected from
based on the flags argument to ion_alloc().

Change-Id: Ieb3d043ab7d5a0e88a32042f9ba24b3535a863ec
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent 9522112f
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ Required properties for Ion heaps
  the following:
    - "SYSTEM"
    - "CARVEOUT"
    - "SECURE_CARVEOUT"
    - "DMA"
    - "HYP_CMA"
    - "SYSTEM_SECURE"
@@ -28,6 +29,7 @@ Optional properties for Ion heaps

- memory-region: phandle to memory region associated with heap.


Example:
	qcom,ion {
                 compatible = "qcom,msm-ion";
@@ -57,3 +59,32 @@ Example:
                 };

        };

"SECURE_CARVEOUT"

This heap type is expected to contain multiple child nodes. Each child node
shall contain the following required properties:

- memory-region:
Refer to Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt

- token:
A u32 containing the set of secure domains which will be able to access the
memory-region.

Example:
qcom,ion {
	compatible = "qcom,msm-ion";
	#address-cells = <1>;
	#size-cells = <0>;

	qcom,ion-heap@14 {
		reg = <14>;
		qcom,ion-heap-type = "SECURE_CARVEOUT";

		node1 {
			memory-region = <&cp_region>;
			token = <ION_FLAG_CP_TOUCH>;
		};
	};
};
+5 −0
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#define ION_SYSTEM_HEAP_NAME	"system"
#define ION_MM_HEAP_NAME	"mm"
#define ION_SPSS_HEAP_NAME	"spss"
#define ION_SECURE_CARVEOUT_HEAP_NAME	"secure_carveout"
#define ION_QSECOM_HEAP_NAME	"qsecom"
#define ION_QSECOM_TA_HEAP_NAME	"qsecom_ta"
#define ION_SECURE_HEAP_NAME	"secure_heap"
@@ -415,6 +416,10 @@ void ion_system_secure_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
void ion_cma_secure_heap_destroy(struct ion_heap *heap);

struct ion_heap *ion_secure_carveout_heap_create(
			struct ion_platform_heap *heap);
void ion_secure_carveout_heap_destroy(struct ion_heap *heap);

/**
 * functions for creating and destroying a heap pool -- allows you
 * to keep a pool of pre allocated memory to use from your heap.  Keeping
+199 −2
Original line number Diff line number Diff line
@@ -22,7 +22,12 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/msm_ion.h>
#include "ion.h"
#include "ion_secure_util.h"

#define ION_CARVEOUT_ALLOCATE_FAIL	-1

@@ -122,7 +127,9 @@ static struct ion_heap_ops carveout_heap_ops = {
	.unmap_kernel = ion_heap_unmap_kernel,
};

struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
static struct ion_heap *__ion_carveout_heap_create(
					struct ion_platform_heap *heap_data,
					bool sync)
{
	struct ion_carveout_heap *carveout_heap;
	int ret;
@@ -134,6 +141,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
	page = pfn_to_page(PFN_DOWN(heap_data->base));
	size = heap_data->size;

	if (sync)
		ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);

	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
@@ -159,6 +167,11 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
	return &carveout_heap->heap;
}

struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
	return __ion_carveout_heap_create(heap_data, true);
}

void ion_carveout_heap_destroy(struct ion_heap *heap)
{
	struct ion_carveout_heap *carveout_heap =
@@ -168,3 +181,187 @@ void ion_carveout_heap_destroy(struct ion_heap *heap)
	kfree(carveout_heap);
	carveout_heap = NULL;
}

struct ion_sc_entry {
	struct list_head list;
	struct ion_heap *heap;
	u32 token;
};

struct ion_sc_heap {
	struct ion_heap heap;
	struct device *dev;
	struct list_head children;
};

static struct ion_heap *ion_sc_find_child(struct ion_heap *heap, u32 flags)
{
	struct ion_sc_heap *manager;
	struct ion_sc_entry *entry;

	manager = container_of(heap, struct ion_sc_heap, heap);
	flags = flags & ION_FLAGS_CP_MASK;
	list_for_each_entry(entry, &manager->children, list) {
		if (entry->token == flags)
			return entry->heap;
	}
	return NULL;
}

static int ion_sc_heap_allocate(struct ion_heap *heap,
				struct ion_buffer *buffer, unsigned long len,
				 unsigned long flags)
{
	struct ion_heap *child;

	/* cache maintenance is not possible on secure memory */
	flags &= ~((unsigned long)ION_FLAG_CACHED);
	buffer->flags = flags;

	child = ion_sc_find_child(heap, flags);
	if (!child)
		return -EINVAL;
	return ion_carveout_heap_allocate(child, buffer, len, flags);
}

static void ion_sc_heap_free(struct ion_buffer *buffer)
{
	struct ion_heap *child;
	struct sg_table *table = buffer->sg_table;
	struct page *page = sg_page(table->sgl);
	phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));

	child = ion_sc_find_child(buffer->heap, buffer->flags);
	if (!child) {
		WARN(1, "ion_secure_carvout: invalid buffer flags on free. Memory will be leaked\n.");
		return;
	}

	ion_carveout_free(child, paddr, buffer->size);
	sg_free_table(table);
	kfree(table);
}

static struct ion_heap_ops ion_sc_heap_ops = {
	.allocate = ion_sc_heap_allocate,
	.free = ion_sc_heap_free,
};

static int ion_sc_get_dt_token(struct ion_sc_entry *entry,
			       struct device_node *np, u64 base, u64 size)
{
	u32 token;
	int ret = -EINVAL;

	if (of_property_read_u32(np, "token", &token))
		return -EINVAL;

	ret = ion_hyp_assign_from_flags(base, size, token);
	if (ret)
		pr_err("secure_carveout_heap: Assign token 0x%x failed\n",
		       token);
	else
		entry->token = token;

	return ret;
}

static int ion_sc_add_child(struct ion_sc_heap *manager,
			    struct device_node *np)
{
	struct device *dev = manager->dev;
	struct ion_platform_heap heap_data = {0};
	struct ion_sc_entry *entry;
	struct device_node *phandle;
	const __be32 *basep;
	u64 base, size;
	int ret;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	INIT_LIST_HEAD(&entry->list);

	phandle = of_parse_phandle(np, "memory-region", 0);
	if (!phandle)
		goto out_free;

	basep = of_get_address(phandle,  0, &size, NULL);
	if (!basep)
		goto out_free;

	base = of_translate_address(phandle, basep);
	if (base == OF_BAD_ADDR)
		goto out_free;

	heap_data.priv = dev;
	heap_data.base = base;
	heap_data.size = size;

	/* This will zero memory initially */
	entry->heap = __ion_carveout_heap_create(&heap_data, false);
	if (IS_ERR(entry->heap))
		goto out_free;

	ret = ion_sc_get_dt_token(entry, np, base, size);
	if (ret)
		goto out_free_carveout;

	list_add(&entry->list, &manager->children);
	dev_info(dev, "ion_secure_carveout: creating heap@0x%llx, size 0x%llx\n",
		 base, size);
	return 0;

out_free_carveout:
	ion_carveout_heap_destroy(entry->heap);
out_free:
	kfree(entry);
	return -EINVAL;
}

void ion_secure_carveout_heap_destroy(struct ion_heap *heap)
{
	struct ion_sc_heap *manager =
		container_of(heap, struct ion_sc_heap, heap);
	struct ion_sc_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &manager->children, list) {
		ion_carveout_heap_destroy(entry->heap);
		kfree(entry);
	}
	kfree(manager);
}

struct ion_heap *ion_secure_carveout_heap_create(
			struct ion_platform_heap *heap_data)
{
	struct device *dev = heap_data->priv;
	int ret;
	struct ion_sc_heap *manager;
	struct device_node *np;

	manager = kzalloc(sizeof(*manager), GFP_KERNEL);
	if (!manager)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&manager->children);
	manager->dev = dev;

	for_each_child_of_node(dev->of_node, np) {
		ret = ion_sc_add_child(manager, np);
		if (ret) {
			dev_err(dev, "Creating child pool %s failed\n",
				np->name);
			goto err;
		}
	}

	manager->heap.ops = &ion_sc_heap_ops;
	manager->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT;
	return &manager->heap;

err:
	ion_secure_carveout_heap_destroy(&manager->heap);
	return ERR_PTR(-EINVAL);
}
+6 −0
Original line number Diff line number Diff line
@@ -348,6 +348,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
	case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
		heap = ion_system_secure_heap_create(heap_data);
		break;
	case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT:
		heap = ion_secure_carveout_heap_create(heap_data);
		break;
	default:
		pr_err("%s: Invalid heap type %d\n", __func__,
		       heap_data->type);
@@ -403,6 +406,9 @@ void ion_heap_destroy(struct ion_heap *heap)
	case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
		ion_system_secure_heap_destroy(heap);
		break;
	case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT:
		ion_secure_carveout_heap_destroy(heap);
		break;
	default:
		pr_err("%s: Invalid heap type %d\n", __func__,
		       heap->type);
+41 −0
Original line number Diff line number Diff line
@@ -239,3 +239,44 @@ bool hlos_accessible_buffer(struct ion_buffer *buffer)

	return true;
}

int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags)
{
	u32 *vmids, *modes;
	u32 nr, i;
	int ret = -EINVAL;
	u32 src_vm = VMID_HLOS;

	nr = count_set_bits(flags);
	vmids = kcalloc(nr, sizeof(*vmids), GFP_KERNEL);
	if (!vmids)
		return -ENOMEM;

	modes = kcalloc(nr, sizeof(*modes), GFP_KERNEL);
	if (!modes) {
		kfree(vmids);
		return -ENOMEM;
	}

	if ((flags & ~ION_FLAGS_CP_MASK) ||
	    populate_vm_list(flags, vmids, nr)) {
		pr_err("%s: Failed to parse secure flags 0x%x\n", __func__,
		       flags);
		goto out;
	}

	for (i = 0; i < nr; i++)
		if (vmids[i] == VMID_CP_SEC_DISPLAY)
			modes[i] = PERM_READ;
		else
			modes[i] = PERM_READ | PERM_WRITE;

	ret = hyp_assign_phys(base, size, &src_vm, 1, vmids, modes, nr);
	if (ret)
		pr_err("%s: Assign call failed, flags 0x%x\n", __func__, flags);

out:
	kfree(modes);
	kfree(vmids);
	return ret;
}
Loading