Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd612c7e authored by Patrick Daly's avatar Patrick Daly Committed by Liam Mark
Browse files

msm: secure_buffer: Limit continuous time spent in hypervisor



hyp_assign_table() is a costly operation during which nonsecure interrupts
are disabled. Split this operation into multiple parts for better
real-time behavior.

Splitting is done by the following criteria:
maximum number of physically contiguous memory regions
maximum total memory size.

Here is a chart showing the average performance of hyp_assign_table() with
N physically contiguous chunks each with size X.

 #chunks    chunk_size(pages) total_memory(pages)    time(ms)
       2                   1                   2       3.354
       2                   4                   8      12.979
       2                 512                1024       4.349
       8                   1                   8       4.714
       8                   4                  32      26.781
       8                 512                4096       8.724
      32                   1                  32      17.093
      32                   4                 128      50.700
      32                 512               16384      26.717
     128                   1                 128      71.076
     128                   4                 512     126.305

Based on the above, select a maximum of 32 chunks or 512 total pages
as the limits.

Change-Id: I530cfdce76c8a2c38f60d6118647eaefd269e693
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent 0870b455
Loading
Loading
Loading
Loading
+40 −5
Original line number Diff line number Diff line
@@ -253,6 +253,9 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
	return info;
}

#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
@@ -267,6 +270,9 @@ int hyp_assign_table(struct sg_table *table,
	struct mem_prot_info *sg_table_copy;
	size_t sg_table_copy_size;

	int batch_start, batch_end;
	u64 batch_size;

	/*
	 * We can only pass cache-aligned sizes to hypervisor, so we need
	 * to kmalloc and memcpy the source_vm_list here.
@@ -312,11 +318,40 @@ int hyp_assign_table(struct sg_table *table,
	dmac_flush_range(dest_vm_copy,
			 (void *)dest_vm_copy + dest_vm_copy_size);

	batch_start = 0;
	while (batch_start < table->nents) {
		/* Ensure no size zero batches */
		batch_size = sg_table_copy[batch_start].size;
		batch_end = batch_start + 1;
		while (1) {
			u64 size;

			if (batch_end >= table->nents)
				break;
			if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
				break;

			size = sg_table_copy[batch_end].size;
			if (size + batch_size >= BATCH_MAX_SIZE)
				break;

			batch_size += size;
			batch_end++;
		}

		desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
		desc.args[1] = (batch_end - batch_start) *
				sizeof(sg_table_copy[0]);

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				MEM_PROT_ASSIGN_ID), &desc);
	if (ret)
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);
			break;
		}
		batch_start = batch_end;
	}

out_unlock:
	mutex_unlock(&secure_buffer_mutex);