Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e93d470c authored by Divya Sharma's avatar Divya Sharma
Browse files

Revert "ion: Ensure ION system secure heap shrinker doesn't deadlock"



This reverts commit 7118861c.

Change-Id: If748409bd899a4f847a2f8b8046d3297b739baff
Signed-off-by: default avatarDivya Sharma <divyash@codeaurora.org>
parent f7c1af66
Loading
Loading
Loading
Loading
+5 −44
Original line number Diff line number Diff line
@@ -248,17 +248,10 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

/*
 *  When -EAGAIN is returned it is safe for the caller to try to call
 *  __hyp_assign_table again.
 *
 *  When -EADDRNOTAVAIL is returned the memory may no longer be in
 *  a usable state and should no longer be accessed by the HLOS.
 */
static int __hyp_assign_table(struct sg_table *table,
int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems, bool try_lock)
			int dest_nelems)
{
	int ret = 0;
	struct scm_desc desc = {0};
@@ -288,17 +281,10 @@ static int __hyp_assign_table(struct sg_table *table,
					  &dest_vm_copy_size);
	if (!dest_vm_copy) {
		ret = -ENOMEM;
		goto out_free_src;
		goto out_free;
	}

	if (try_lock) {
		if (!mutex_trylock(&secure_buffer_mutex)) {
			ret = -EAGAIN;
			goto out_free_dest;
		}
	} else {
	mutex_lock(&secure_buffer_mutex);
	}

	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
	if (!sg_table_copy) {
@@ -354,12 +340,6 @@ static int __hyp_assign_table(struct sg_table *table,
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);

			/*
			 * Make it clear to clients that the memory may no
			 * longer be in a usable state.
			 */
			ret = -EADDRNOTAVAIL;
			break;
		}
		batch_start = batch_end;
@@ -367,31 +347,12 @@ static int __hyp_assign_table(struct sg_table *table,

out_unlock:
	mutex_unlock(&secure_buffer_mutex);
out_free_dest:
	kfree(dest_vm_copy);
out_free_src:
out_free:
	kfree(source_vm_copy);
	return ret;
}

int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, false);
}

int try_hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, true);
}

int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
			int source_nelems, int *dest_vmids,
			int *dest_perms, int dest_nelems)
+6 −12
Original line number Diff line number Diff line
@@ -99,8 +99,7 @@ static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
}

int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
			int source_nelems, bool clear_page_private,
			bool try_lock)
			int source_nelems, bool clear_page_private)
{
	u32 dest_vmid = VMID_HLOS;
	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -114,14 +113,9 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
		goto out;
	}

	if (try_lock)
		ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
					   &dest_vmid, &dest_perms, 1);
	else
	ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
			       &dest_vmid, &dest_perms, 1);
	if (ret) {
		if (!try_lock)
		pr_err("%s: Unassign call failed.\n",
		       __func__);
		goto out;
@@ -199,7 +193,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
	}

	ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
				  set_page_private, false);
				  set_page_private);

out_free_source:
	kfree(source_vm_list);
+1 −2
Original line number Diff line number Diff line
@@ -20,8 +20,7 @@ bool is_secure_vmid_valid(int vmid);
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
		      int dest_nelems, bool set_page_private);
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
			int source_nelems, bool clear_page_private,
			bool try_lock);
			int source_nelems, bool clear_page_private);
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
				   bool set_page_private);
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
+2 −2
Original line number Diff line number Diff line
@@ -392,7 +392,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;

	if (vmid > 0)
		ion_hyp_unassign_sg(table, &vmid, 1, true, false);
		ion_hyp_unassign_sg(table, &vmid, 1, true);

	for_each_sg(table->sgl, sg, table->nents, i)
		free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -433,7 +433,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
		if (vmid < 0)
			ion_heap_buffer_zero(buffer);
	} else if (vmid > 0) {
		if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
			return;
	}

+2 −7
Original line number Diff line number Diff line
@@ -463,10 +463,7 @@ int ion_secure_page_pool_shrink(
		sg = sg_next(sg);
	}

	ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
	if (ret == -EADDRNOTAVAIL)
		goto out3;
	else if (ret < 0)
	if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
		goto out2;

	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -477,8 +474,6 @@ int ion_secure_page_pool_shrink(
	sg_free_table(&sgt);
	return freed;

out2:
	sg_free_table(&sgt);
out1:
	/* Restore pages to secure pool */
	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -486,7 +481,7 @@ int ion_secure_page_pool_shrink(
		ion_page_pool_free(pool, page);
	}
	return 0;
out3:
out2:
	/*
	 * The security state of the pages is unknown after a failure;
	 * They can neither be added back to the secure pool nor buddy system.
Loading