Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cae595f4 authored by Vijayanand Jitta's avatar Vijayanand Jitta
Browse files

ion: Check for valid vmids for msm system heap



Add a check for valid vmids before doing an allocation
for msm system heap. This check does a hyp_assign on a
page, stores the vmids and mark them as valid or invalid
based on hyp_assign result. If vmid is invalid it returns
or if its valid then proceeds with the allocation. So, this
prevents ion pages leak in cases where hyp_assign fails.
Also add a check for ion_hyp_assign_sg return value to
prevent leak.

Change-Id: Ibbc150490daf69d27b36db0e45a3c58496e6b9bd
Signed-off-by: default avatarVijayanand Jitta <vjitta@codeaurora.org>
parent 58329ca1
Loading
Loading
Loading
Loading
+64 −7
Original line number Original line Diff line number Diff line
@@ -34,6 +34,8 @@ static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
static bool pool_auto_refill_en  __read_mostly =
static bool pool_auto_refill_en  __read_mostly =
IS_ENABLED(CONFIG_ION_POOL_AUTO_REFILL);
IS_ENABLED(CONFIG_ION_POOL_AUTO_REFILL);


static bool valid_vmids[VMID_LAST];

int order_to_index(unsigned int order)
int order_to_index(unsigned int order)
{
{
	int i;
	int i;
@@ -274,6 +276,42 @@ static void process_info(struct page_info *info,
	kfree(info);
	kfree(info);
}
}


static bool check_valid_vmid(int dest_vmid, struct ion_msm_system_heap *sys_heap)
{
	phys_addr_t addr;
	struct page *page;
	int ret;
	bool from_pool = true;
	u32 source_vmid = VMID_HLOS;
	u32 dest_perms = msm_secure_get_vmid_perms(dest_vmid);
	int order_ind = order_to_index(0);

	if (valid_vmids[dest_vmid])
		return true;

	page = ion_msm_page_pool_alloc(sys_heap->uncached_pools[order_ind],
				       &from_pool);
	if (!page)
		return false;

	if (!from_pool)
		ion_pages_sync_for_device(sys_heap->heap.dev,
					  page, PAGE_SIZE,
					  DMA_BIDIRECTIONAL);
	addr = page_to_phys(page);
	ret = hyp_assign_phys(addr, PAGE_SIZE, &source_vmid, 1,
			      &dest_vmid, &dest_perms, 1);
	if (ret) {
		ion_msm_page_pool_free(sys_heap->uncached_pools[order_ind],
				       page);
		return false;
	}
	valid_vmids[dest_vmid] = true;
	ion_msm_page_pool_free(sys_heap->secure_pools[dest_vmid][order_ind],
			       page);
	return true;
}

static int ion_msm_system_heap_allocate(struct ion_heap *heap,
static int ion_msm_system_heap_allocate(struct ion_heap *heap,
					struct ion_buffer *buffer,
					struct ion_buffer *buffer,
					unsigned long size,
					unsigned long size,
@@ -306,6 +344,19 @@ static int ion_msm_system_heap_allocate(struct ion_heap *heap,
		return -EINVAL;
		return -EINVAL;
	}
	}


	/*
	 * check if vmid is valid and skip this
	 * check for trusted vm vmids (i.e; for
	 * vmids > VMID_LAST) assuming vmids for
	 * trusted vm are already validated.
	 */
	if (vmid > 0 && vmid < VMID_LAST &&
	    !check_valid_vmid(vmid, sys_heap)) {
		pr_err("%s: VMID: %d not valid\n",
		       __func__, vmid);
		return -EINVAL;
	}

	INIT_LIST_HEAD(&pages);
	INIT_LIST_HEAD(&pages);
	INIT_LIST_HEAD(&pages_from_pool);
	INIT_LIST_HEAD(&pages_from_pool);


@@ -392,8 +443,10 @@ static int ion_msm_system_heap_allocate(struct ion_heap *heap,
	if (nents_sync) {
	if (nents_sync) {
		if (vmid > 0) {
		if (vmid > 0) {
			ret = ion_hyp_assign_sg(&table_sync, &vmid, 1, true);
			ret = ion_hyp_assign_sg(&table_sync, &vmid, 1, true);
			if (ret)
			if (ret == -EADDRNOTAVAIL)
				goto err_free_sg2;
				goto err_free_sg2;
			else if (ret < 0)
				goto err_free;
		}
		}
	}
	}


@@ -412,16 +465,20 @@ static int ion_msm_system_heap_allocate(struct ion_heap *heap,
	return 0;
	return 0;


err_free_sg2:
err_free_sg2:
	/* We failed to zero buffers. Bypass pool */
	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;

	if (vmid > 0)
	if (vmid > 0)
		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
		if (ion_hyp_unassign_sg(&table_sync, &vmid, 1, true))
			goto err_free_table_sync;
			goto err_free_table_sync;

err_free:
	for_each_sg(table->sgl, sg, table->nents, i)
	for_each_sg(table->sgl, sg, table->nents, i) {
		if (!PagePrivate(sg_page(sg))) {
			/* Pages from buddy are not zeroed. Bypass pool */
			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
		} else {
			buffer->private_flags &= ~ION_PRIV_FLAG_SHRINKER_FREE;
		}
		free_buffer_page(sys_heap, buffer, sg_page(sg),
		free_buffer_page(sys_heap, buffer, sg_page(sg),
				 get_order(sg->length));
				 get_order(sg->length));
	}
err_free_table_sync:
err_free_table_sync:
	if (nents_sync)
	if (nents_sync)
		sg_free_table(&table_sync);
		sg_free_table(&table_sync);