Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bca12d31 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: secure_buffer: Process large SG tables in batches"

parents e4e592b9 37be1adf
Loading
Loading
Loading
Loading
+65 −106
Original line number Diff line number Diff line
@@ -46,8 +46,8 @@ struct dest_vm_and_perm_info {
	u32 ctx_size;
};

static void *qcom_secure_mem;
#define QCOM_SECURE_MEM_SIZE (2048*1024)
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

static int secure_buffer_change_chunk(u32 chunks,
				u32 nchunks,
@@ -201,42 +201,72 @@ populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
}

/* Must hold secure_buffer_mutex while allocated buffer is in use */
static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
						      size_t *size_in_bytes)
static unsigned int get_batches_from_sgl(struct mem_prot_info *sg_table_copy,
					 struct scatterlist *sgl,
					 struct scatterlist **next_sgl)
{
	int i;
	struct scatterlist *sg;
	struct mem_prot_info *info;
	size_t size;

	size = table->nents * sizeof(*info);

	if (size >= QCOM_SECURE_MEM_SIZE) {
		pr_err("%s: Not enough memory allocated. Required size %zd\n",
				__func__, size);
		return NULL;
	u64 batch_size = 0;
	unsigned int i = 0;
	struct scatterlist *curr_sgl = sgl;

	/* Ensure no zero size batches */
	do {
		sg_table_copy[i].addr = page_to_phys(sg_page(curr_sgl));
		sg_table_copy[i].size = curr_sgl->length;
		batch_size += sg_table_copy[i].size;
		curr_sgl = sg_next(curr_sgl);
		i++;
	} while (curr_sgl && i < BATCH_MAX_SECTIONS &&
		 curr_sgl->length + batch_size < BATCH_MAX_SIZE);

	*next_sgl = curr_sgl;
	return i;
}

	if (!qcom_secure_mem) {
		pr_err("%s is not functional as qcom_secure_mem is not allocated.\n",
				__func__);
		return NULL;
	}
static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
{
	unsigned int entries_size;
	unsigned int batch_start = 0;
	unsigned int batches_processed;
	struct scatterlist *curr_sgl = table->sgl;
	struct scatterlist *next_sgl;
	int ret = 0;
	struct mem_prot_info *sg_table_copy = kcalloc(BATCH_MAX_SECTIONS,
						      sizeof(*sg_table_copy),
						      GFP_KERNEL);

	/* "Allocate" it */
	info = qcom_secure_mem;
	if (!sg_table_copy)
		return -ENOMEM;

	for_each_sg(table->sgl, sg, table->nents, i) {
		info[i].addr = page_to_phys(sg_page(sg));
		info[i].size = sg->length;
	while (batch_start < table->nents) {
		batches_processed = get_batches_from_sgl(sg_table_copy,
							 curr_sgl, &next_sgl);
		curr_sgl = next_sgl;
		entries_size = batches_processed * sizeof(*sg_table_copy);
		dmac_flush_range(sg_table_copy,
				 (void *)sg_table_copy + entries_size);
		desc->args[0] = virt_to_phys(sg_table_copy);
		desc->args[1] = entries_size;

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				MEM_PROT_ASSIGN_ID), desc);
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);
			/*
			 * Make it clear to clients that the memory may no
			 * longer be in a usable state.
			 */
			ret = -EADDRNOTAVAIL;
			break;
		}

	*size_in_bytes = size;
	return info;
		batch_start += batches_processed;
	}

#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32
	kfree(sg_table_copy);
	return ret;
}

/*
 *  When -EAGAIN is returned it is safe for the caller to try to call
@@ -256,11 +286,10 @@ static int __hyp_assign_table(struct sg_table *table,
	size_t source_vm_copy_size;
	struct dest_vm_and_perm_info *dest_vm_copy;
	size_t dest_vm_copy_size;
	struct mem_prot_info *sg_table_copy;
	size_t sg_table_copy_size;

	int batch_start, batch_end;
	u64 batch_size;
	if (!table || !table->sgl || !source_vm_list || !source_nelems ||
	    !dest_vmids || !dest_perms || !dest_nelems)
		return -EINVAL;

	/*
	 * We can only pass cache-aligned sizes to hypervisor, so we need
@@ -278,7 +307,7 @@ static int __hyp_assign_table(struct sg_table *table,
					  &dest_vm_copy_size);
	if (!dest_vm_copy) {
		ret = -ENOMEM;
		goto out_free_src;
		goto out_free_source;
	}

	if (try_lock) {
@@ -290,14 +319,6 @@ static int __hyp_assign_table(struct sg_table *table,
		mutex_lock(&secure_buffer_mutex);
	}

	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
	if (!sg_table_copy) {
		ret = -ENOMEM;
		goto out_unlock;
	}

	desc.args[0] = virt_to_phys(sg_table_copy);
	desc.args[1] = sg_table_copy_size;
	desc.args[2] = virt_to_phys(source_vm_copy);
	desc.args[3] = source_vm_copy_size;
	desc.args[4] = virt_to_phys(dest_vm_copy);
@@ -309,57 +330,15 @@ static int __hyp_assign_table(struct sg_table *table,

	dmac_flush_range(source_vm_copy,
			 (void *)source_vm_copy + source_vm_copy_size);
	dmac_flush_range(sg_table_copy,
			 (void *)sg_table_copy + sg_table_copy_size);
	dmac_flush_range(dest_vm_copy,
			 (void *)dest_vm_copy + dest_vm_copy_size);

	batch_start = 0;
	while (batch_start < table->nents) {
		/* Ensure no size zero batches */
		batch_size = sg_table_copy[batch_start].size;
		batch_end = batch_start + 1;
		while (1) {
			u64 size;

			if (batch_end >= table->nents)
				break;
			if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
				break;

			size = sg_table_copy[batch_end].size;
			if (size + batch_size >= BATCH_MAX_SIZE)
				break;

			batch_size += size;
			batch_end++;
		}

		desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
		desc.args[1] = (batch_end - batch_start) *
				sizeof(sg_table_copy[0]);
	ret = batched_hyp_assign(table, &desc);

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				MEM_PROT_ASSIGN_ID), &desc);
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);

			/*
			 * Make it clear to clients that the memory may no
			 * longer be in a usable state.
			 */
			ret = -EADDRNOTAVAIL;
			break;
		}
		batch_start = batch_end;
	}

out_unlock:
	mutex_unlock(&secure_buffer_mutex);
out_free_dest:
	kfree(dest_vm_copy);
out_free_src:
out_free_source:
	kfree(source_vm_copy);
	return ret;
}
@@ -459,23 +438,3 @@ bool msm_secure_v2_is_supported(void)
	return (scm_get_feat_version(FEATURE_ID_CP) >=
			MAKE_CP_VERSION(1, 1, 0));
}

static int __init alloc_secure_shared_memory(void)
{
	int ret = 0;
	dma_addr_t dma_handle;

	qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL);
	if (!qcom_secure_mem) {
		/* Fallback to CMA-DMA memory */
		qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE,
						&dma_handle, GFP_KERNEL);
		if (!qcom_secure_mem) {
			pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n");
			return -ENOMEM;
		}
	}

	return ret;
}
pure_initcall(alloc_secure_shared_memory);