Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9021b6b5 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Consolidate and fix redundant secure buffer code"

parents 318ead95 c3aef20e
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -2997,7 +2997,7 @@ static int kgsl_setup_dma_buf(struct kgsl_mem_entry *entry,

	attach = dma_buf_attach(dmabuf, device->dev);
	if (IS_ERR_OR_NULL(attach)) {
		ret = PTR_ERR(attach);
		ret = attach ? PTR_ERR(attach) : -EINVAL;
		goto out;
	}

@@ -3054,14 +3054,11 @@ static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
	int fd = param->fd;
	struct dma_buf *dmabuf;

	if (!param->len || param->offset)
		return -EINVAL;

	dmabuf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dmabuf)) {
		ret = PTR_ERR(dmabuf);
		return ret ? ret : -EINVAL;
	}

	if (IS_ERR_OR_NULL(dmabuf))
		return (dmabuf == NULL) ? -EINVAL : PTR_ERR(dmabuf);

	ret = kgsl_setup_dma_buf(entry, pagetable, device, dmabuf);
	if (ret)
		dma_buf_put(dmabuf);
+58 −118
Original line number Diff line number Diff line
@@ -83,8 +83,7 @@ struct mem_entry_stats {
		mem_entry_max_show), \
}

static int kgsl_cma_unlock_secure(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc);
static void kgsl_cma_unlock_secure(struct kgsl_memdesc *memdesc);

/**
 * Given a kobj, find the process structure attached to it
@@ -490,9 +489,7 @@ static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
	if (memdesc->hostptr) {
		if (memdesc->priv & KGSL_MEMDESC_SECURE) {
			kgsl_driver.stats.secure -= memdesc->size;
			if (memdesc->priv & KGSL_MEMDESC_TZ_LOCKED)
				kgsl_cma_unlock_secure(
				memdesc->pagetable->mmu->device, memdesc);
			kgsl_cma_unlock_secure(memdesc);
			attrs = &memdesc->attrs;
		} else
			kgsl_driver.stats.coherent -= memdesc->size;
@@ -944,44 +941,13 @@ err:
}
EXPORT_SYMBOL(kgsl_cma_alloc_coherent);

int kgsl_cma_alloc_secure(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc, size_t size)
static int scm_lock_chunk(struct kgsl_memdesc *memdesc, int lock)
{
	struct kgsl_iommu *iommu = device->mmu.priv;
	struct kgsl_iommu_unit *iommu_unit =
			&iommu->iommu_units[KGSL_IOMMU_UNIT_0];
	int result = 0;
	struct cp2_lock_req request;
	unsigned int resp;
	unsigned int *chunk_list = NULL;
	struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
	unsigned int *chunk_list;
	struct scm_desc desc = {0};

	if (size == 0)
		return -EINVAL;

	/* Align size to 1M boundaries */
	size = ALIGN(size, SZ_1M);

	memdesc->size = size;
	memdesc->pagetable = pagetable;
	memdesc->ops = &kgsl_cma_ops;
	memdesc->dev = iommu_unit->dev[KGSL_IOMMU_CONTEXT_SECURE].dev;

	init_dma_attrs(&memdesc->attrs);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &memdesc->attrs);

	memdesc->hostptr = dma_alloc_attrs(memdesc->dev, size,
		&memdesc->physaddr, GFP_KERNEL, &memdesc->attrs);

	if (memdesc->hostptr == NULL) {
		result = -ENOMEM;
		goto err;
	}

	result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
	if (result)
		goto err;
	int result;

	/*
	 * Flush the virt addr range before sending the memory to the
@@ -994,10 +960,9 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
	 * contiguous.
	 */
	chunk_list = kzalloc(sizeof(unsigned int), GFP_KERNEL);
	if (!chunk_list) {
		result = -ENOMEM;
		goto err;
	}
	if (!chunk_list)
		return -ENOMEM;

	chunk_list[0] = memdesc->physaddr;
	dmac_flush_range((void *)chunk_list, (void *)chunk_list + 1);

@@ -1005,21 +970,12 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
	desc.args[1] = request.chunks.chunk_list_size = 1;
	desc.args[2] = request.chunks.chunk_size = memdesc->size;
	desc.args[3] = request.mem_usage = 0;
	desc.args[4] = request.lock = 1;
	desc.args[4] = request.lock = lock;
	desc.args[5] = 0;
	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
				SCM_VAL);
	kmap_flush_unused();
	kmap_atomic_flush_unused();

	if (result == 0)
		memdesc->priv |= KGSL_MEMDESC_TZ_LOCKED;
	else {
		KGSL_DRV_ERR(device, "Secure buffer size %zx failed pt %d\n",
					 memdesc->size, pagetable->name);
		goto err;
	}

	if (!is_scm_armv8()) {
		result = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
				&request, sizeof(request), &resp, sizeof(resp));
@@ -1028,18 +984,57 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
				   MEM_PROTECT_LOCK_ID2_FLAT), &desc);
		resp = desc.ret[0];
	}
	if (result) {
		KGSL_DRV_ERR(device, "Secure buffer allocation failed\n");

	kfree(chunk_list);
	return result;
}

int kgsl_cma_alloc_secure(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc, size_t size)
{
	struct kgsl_iommu *iommu = device->mmu.priv;
	struct kgsl_iommu_unit *iommu_unit =
			&iommu->iommu_units[KGSL_IOMMU_UNIT_0];
	int result = 0;
	struct kgsl_pagetable *pagetable = device->mmu.securepagetable;

	if (size == 0)
		return -EINVAL;

	/* Align size to 1M boundaries */
	size = ALIGN(size, SZ_1M);

	memdesc->size = size;
	memdesc->pagetable = pagetable;
	memdesc->ops = &kgsl_cma_ops;
	memdesc->dev = iommu_unit->dev[KGSL_IOMMU_CONTEXT_SECURE].dev;

	init_dma_attrs(&memdesc->attrs);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &memdesc->attrs);

	memdesc->hostptr = dma_alloc_attrs(memdesc->dev, size,
		&memdesc->physaddr, GFP_KERNEL, &memdesc->attrs);

	if (memdesc->hostptr == NULL) {
		result = -ENOMEM;
		goto err;
	}

	result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
	if (result)
		goto err;

	result = scm_lock_chunk(memdesc, 1);

	if (result != 0)
		goto err;

	memdesc->priv |= KGSL_MEMDESC_TZ_LOCKED;

	/* Record statistics */
	KGSL_STATS_ADD(size, kgsl_driver.stats.secure,
	       kgsl_driver.stats.secure_max);

err:
	kfree(chunk_list);

	if (result)
		kgsl_sharedmem_free(memdesc);

@@ -1049,67 +1044,12 @@ EXPORT_SYMBOL(kgsl_cma_alloc_secure);

/**
 * kgsl_cma_unlock_secure() - Unlock secure memory by calling TZ
 * @device: kgsl device pointer
 * @memdesc: memory descriptor
 */
static int kgsl_cma_unlock_secure(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc)
static void kgsl_cma_unlock_secure(struct kgsl_memdesc *memdesc)
{
	int result = 0;
	struct cp2_lock_req request;
	unsigned int resp;
	unsigned int *chunk_list;
	struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
	struct scm_desc desc;

	if (!memdesc->size) {
		KGSL_DRV_ERR(device, "Secure buffer invalid size 0\n");
		return -EINVAL;
	}

	if (!IS_ALIGNED(memdesc->size, SZ_1M)) {
		KGSL_DRV_ERR(device,
			 "Secure buffer size %zx must be %x aligned",
			 memdesc->size, SZ_1M);
			return -EINVAL;
	}

	/*
	 * Flush the phys addr range before sending the memory to the
	 * secure environment to ensure the data is actually present
	 * in RAM
	 */
	chunk_list = kzalloc(sizeof(unsigned int), GFP_KERNEL);
	if (!chunk_list)
		return -ENOMEM;
	chunk_list[0] = memdesc->physaddr;
	dmac_flush_range((void *)chunk_list, (void *)chunk_list + 1);

	desc.args[0] = request.chunks.chunk_list = virt_to_phys(chunk_list);
	desc.args[1] = request.chunks.chunk_list_size = 1;
	desc.args[2] = request.chunks.chunk_size = memdesc->size;
	desc.args[3] = request.mem_usage = 0;
	desc.args[4] = request.lock = 0;
	desc.args[5] = 0;
	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
				SCM_VAL);
	kmap_flush_unused();
	kmap_atomic_flush_unused();

	if (!is_scm_armv8()) {
		result = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
				&request, sizeof(request), &resp, sizeof(resp));
	} else {
		result = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				   MEM_PROTECT_LOCK_ID2_FLAT), &desc);
		resp = desc.ret[0];
	}
	kfree(chunk_list);

	if (result)
		KGSL_DRV_ERR(device,
		"Secure buffer unlock size %zx failed pt %d\n",
		memdesc->size, pagetable->name);
	if (memdesc->size == 0 || !(memdesc->priv & KGSL_MEMDESC_TZ_LOCKED))
		return;

	return result;
	scm_lock_chunk(memdesc, 0);
}