Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fae9452a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ion: Ensure ION system secure heap shrinker doesn't deadlock"

parents 8d2bd428 7879836f
Loading
Loading
Loading
Loading
+44 −5
Original line number Diff line number Diff line
@@ -248,10 +248,17 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

int hyp_assign_table(struct sg_table *table,
/*
 *  When -EAGAIN is returned it is safe for the caller to try to call
 *  __hyp_assign_table again.
 *
 *  When -EADDRNOTAVAIL is returned the memory may no longer be in
 *  a usable state and should no longer be accessed by the HLOS.
 */
static int __hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
			int dest_nelems, bool try_lock)
{
	int ret = 0;
	struct scm_desc desc = {0};
@@ -281,10 +288,17 @@ int hyp_assign_table(struct sg_table *table,
					  &dest_vm_copy_size);
	if (!dest_vm_copy) {
		ret = -ENOMEM;
		goto out_free;
		goto out_free_src;
	}

	if (try_lock) {
		if (!mutex_trylock(&secure_buffer_mutex)) {
			ret = -EAGAIN;
			goto out_free_dest;
		}
	} else {
		mutex_lock(&secure_buffer_mutex);
	}

	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
	if (!sg_table_copy) {
@@ -340,6 +354,12 @@ int hyp_assign_table(struct sg_table *table,
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);

			/*
			 * Make it clear to clients that the memory may no
			 * longer be in a usable state.
			 */
			ret = -EADDRNOTAVAIL;
			break;
		}
		batch_start = batch_end;
@@ -347,12 +367,31 @@ int hyp_assign_table(struct sg_table *table,

out_unlock:
	mutex_unlock(&secure_buffer_mutex);
out_free_dest:
	kfree(dest_vm_copy);
out_free:
out_free_src:
	kfree(source_vm_copy);
	return ret;
}

int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, false);
}

int try_hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	return __hyp_assign_table(table, source_vm_list, source_nelems,
				  dest_vmids, dest_perms, dest_nelems, true);
}

int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
			int source_nelems, int *dest_vmids,
			int *dest_perms, int dest_nelems)
+12 −6
Original line number Diff line number Diff line
@@ -99,7 +99,8 @@ static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
}

int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
			int source_nelems, bool clear_page_private)
			int source_nelems, bool clear_page_private,
			bool try_lock)
{
	u32 dest_vmid = VMID_HLOS;
	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -113,9 +114,14 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
		goto out;
	}

	if (try_lock)
		ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
					   &dest_vmid, &dest_perms, 1);
	else
		ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
				       &dest_vmid, &dest_perms, 1);
	if (ret) {
		if (!try_lock)
			pr_err("%s: Unassign call failed.\n",
			       __func__);
		goto out;
@@ -193,7 +199,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
	}

	ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
				  set_page_private);
				  set_page_private, false);

out_free_source:
	kfree(source_vm_list);
+2 −1
Original line number Diff line number Diff line
@@ -20,7 +20,8 @@ bool is_secure_vmid_valid(int vmid);
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
		      int dest_nelems, bool set_page_private);
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
			int source_nelems, bool clear_page_private);
			int source_nelems, bool clear_page_private,
			bool try_lock);
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
				   bool set_page_private);
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
+2 −2
Original line number Diff line number Diff line
@@ -394,7 +394,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;

	if (vmid > 0)
		ion_hyp_unassign_sg(table, &vmid, 1, true);
		ion_hyp_unassign_sg(table, &vmid, 1, true, false);

	for_each_sg(table->sgl, sg, table->nents, i)
		free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -435,7 +435,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
		if (vmid < 0)
			ion_heap_buffer_zero(buffer);
	} else if (vmid > 0) {
		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
		if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
			return;
	}

+7 −2
Original line number Diff line number Diff line
@@ -463,7 +463,10 @@ int ion_secure_page_pool_shrink(
		sg = sg_next(sg);
	}

	if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
	ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
	if (ret == -EADDRNOTAVAIL)
		goto out3;
	else if (ret < 0)
		goto out2;

	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -474,6 +477,8 @@ int ion_secure_page_pool_shrink(
	sg_free_table(&sgt);
	return freed;

out2:
	sg_free_table(&sgt);
out1:
	/* Restore pages to secure pool */
	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -481,7 +486,7 @@ int ion_secure_page_pool_shrink(
		ion_page_pool_free(pool, page);
	}
	return 0;
out2:
out3:
	/*
	 * The security state of the pages is unknown after a failure;
	 * They can neither be added back to the secure pool nor buddy system.
Loading