Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7118861c authored by Liam Mark's avatar Liam Mark Committed by Divya Sharma
Browse files

ion: Ensure ION system secure heap shrinker doesn't deadlock



When the ION system secure heap shrinker runs it attempts to free
secure ION page pool pages back to the system. It does this by first HYP
assigning these pages back to the system via the hyp_assign_table call.

However it is possible that the shrinker was called as a result of
somebody else calling hyp_assign_table, this is bad because the
hyp_assign_table call holds the secure_buffer_mutex mutex so when the ION
system secure heap shrinker recursively calls into hyp_assign_table it
will attempt to take the secure_buffer_mutex mutex and the system will
deadlock.

Example of deadlock callstack

-001|__schedule()
-002|schedule()
-003|schedule_preempt_disabled()
-004|__mutex_lock_common()
-005|__mutex_lock()
-006|__mutex_lock_slowpath()
-007|__cmpxchg_acq(inline)
-007|__mutex_trylock_fast(inline)
-007|mutex_lock()
-008|get_info_list_from_table(inline)
-008|hyp_assign_table()
-009|ion_hyp_unassign_sg()
-010|sg_set_page(inline)
-010|ion_secure_page_pool_shrink()
-011|ion_system_heap_shrink()
-012|ion_system_secure_heap_shrink()
-013|ion_heap_shrink_scan()
-014|shrink_slab()
-015|shrink_node()
-016|do_try_to_free_pages()
-017|__read_once_size(inline)
-017|static_key_count(inline)
-017|static_key_false(inline)
-017|trace_mm_vmscan_direct_reclaim_end(inline)
-017|try_to_free_pages()
-018|memalloc_noreclaim_restore(inline)
-018|__perform_reclaim(inline)
-018|__alloc_pages_direct_reclaim(inline)
-018|__alloc_pages_slowpath(inline)
-018|__alloc_pages_nodemask()
-019|__alloc_pages(inline)
-019|__alloc_pages_node(inline)
-019|alloc_slab_page(inline)
-019|allocate_slab(inline)
-019|new_slab()
-020|___slab_alloc()
-021|__slab_alloc(inline)
-021|slab_alloc_node(inline)
-021|slab_alloc(inline)
-021|__kmalloc()
-022|allocate_extra_arg_buffer()
-023|__scm_call2()
-024|scm_call2()
-025|hyp_assign_table()
-026|kcalloc(inline)
-026|ion_hyp_assign_sg()
-027|ion_system_secure_heap_prefetch_work()
-028|__read_once_size(inline)
-028|static_key_count(inline)
-028|static_key_false(inline)
-028|trace_workqueue_execute_end(inline)
-028|process_one_work()
-029|__read_once_size(inline)
-029|list_empty(inline)
-029|worker_thread()
-030|kthread()
-031|ret_from_fork(asm)
---|end of frame

To prevent this deadlock from happening ensure that the ION system secure
heap shrinker only tries to acquire he secure_buffer_mutex mutex with a
try_lock call.

Change-Id: I0e108b181ba36c57c382517d5916131f9c9b92eb
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
Signed-off-by: default avatarDivya Sharma <divyash@codeaurora.org>
parent 731db77a
Loading
Loading
Loading
Loading
+44 −5
Original line number Diff line number Diff line
@@ -248,10 +248,17 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

int hyp_assign_table(struct sg_table *table,
/*
 *  When -EAGAIN is returned it is safe for the caller to try to call
 *  __hyp_assign_table again.
 *
 *  When -EADDRNOTAVAIL is returned the memory may no longer be in
 *  a usable state and should no longer be accessed by the HLOS.
 */
static int __hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
			int dest_nelems, bool try_lock)
{
	int ret = 0;
	struct scm_desc desc = {0};
@@ -281,10 +288,17 @@ int hyp_assign_table(struct sg_table *table,
					  &dest_vm_copy_size);
	if (!dest_vm_copy) {
		ret = -ENOMEM;
		goto out_free;
		goto out_free_src;
	}

	if (try_lock) {
		if (!mutex_trylock(&secure_buffer_mutex)) {
			ret = -EAGAIN;
			goto out_free_dest;
		}
	} else {
		mutex_lock(&secure_buffer_mutex);
	}

	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
	if (!sg_table_copy) {
@@ -340,6 +354,12 @@ int hyp_assign_table(struct sg_table *table,
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);

			/*
			 * Make it clear to clients that the memory may no
			 * longer be in a usable state.
			 */
			ret = -EADDRNOTAVAIL;
			break;
		}
		batch_start = batch_end;
@@ -347,12 +367,31 @@ int hyp_assign_table(struct sg_table *table,

out_unlock:
	mutex_unlock(&secure_buffer_mutex);
out_free_dest:
	kfree(dest_vm_copy);
out_free:
out_free_src:
	kfree(source_vm_copy);
	return ret;
}

int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, false);
}

int try_hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, true);
}

int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
			int source_nelems, int *dest_vmids,
			int *dest_perms, int dest_nelems)
+12 −6
Original line number Diff line number Diff line
@@ -99,7 +99,8 @@ static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
}

int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
			int source_nelems, bool clear_page_private)
			int source_nelems, bool clear_page_private,
			bool try_lock)
{
	u32 dest_vmid = VMID_HLOS;
	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -113,9 +114,14 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
		goto out;
	}

	if (try_lock)
		ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
					   &dest_vmid, &dest_perms, 1);
	else
		ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
				       &dest_vmid, &dest_perms, 1);
	if (ret) {
		if (!try_lock)
			pr_err("%s: Unassign call failed.\n",
			       __func__);
		goto out;
@@ -193,7 +199,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
	}

	ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
				  set_page_private);
				  set_page_private, false);

out_free_source:
	kfree(source_vm_list);
+2 −1
Original line number Diff line number Diff line
@@ -20,7 +20,8 @@ bool is_secure_vmid_valid(int vmid);
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
		      int dest_nelems, bool set_page_private);
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
			int source_nelems, bool clear_page_private);
			int source_nelems, bool clear_page_private,
			bool try_lock);
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
				   bool set_page_private);
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
+2 −2
Original line number Diff line number Diff line
@@ -392,7 +392,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;

	if (vmid > 0)
		ion_hyp_unassign_sg(table, &vmid, 1, true);
		ion_hyp_unassign_sg(table, &vmid, 1, true, false);

	for_each_sg(table->sgl, sg, table->nents, i)
		free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -433,7 +433,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
		if (vmid < 0)
			ion_heap_buffer_zero(buffer);
	} else if (vmid > 0) {
		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
		if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
			return;
	}

+7 −2
Original line number Diff line number Diff line
@@ -463,7 +463,10 @@ int ion_secure_page_pool_shrink(
		sg = sg_next(sg);
	}

	if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
	ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
	if (ret == -EADDRNOTAVAIL)
		goto out3;
	else if (ret < 0)
		goto out2;

	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -474,6 +477,8 @@ int ion_secure_page_pool_shrink(
	sg_free_table(&sgt);
	return freed;

out2:
	sg_free_table(&sgt);
out1:
	/* Restore pages to secure pool */
	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -481,7 +486,7 @@ int ion_secure_page_pool_shrink(
		ion_page_pool_free(pool, page);
	}
	return 0;
out2:
out3:
	/*
	 * The security state of the pages is unknown after a failure;
	 * They can neither be added back to the secure pool nor buddy system.
Loading