Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 032b0de1 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Make secure memory depend on QCOM_SECURE_BUFFER



Secure memory uses a helper driver to lock memory in the hypervisor. The
helper driver is selectable at compile time (CONFIG_QCOM_SECURE_BUFFER)
Any attempts to allocate secure memory without the dependent support
will return -ENODEV which the kernel and user drivers should know how to
handle safely.

Change-Id: Ic0dedbadbde75ce8856a677d7c7793cda0c2c727
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent fef1dd5a
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1427,8 +1427,10 @@ static int adreno_probe(struct platform_device *pdev)
	 * check the GPU capabilities here and modify mmu->secured accordingly
	 */

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
	if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
		device->mmu.secured = false;
#endif

	if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
		device->mmu.features |= KGSL_MMU_IO_COHERENT;
+12 −29
Original line number Diff line number Diff line
@@ -1003,6 +1003,7 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
	return ret;
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
@@ -1011,7 +1012,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
	struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
	int secure_vmid = VMID_CP_PIXEL;
	unsigned int cb_num;

	if (!mmu->secured)
		return -EPERM;
@@ -1036,23 +1036,17 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
	iommu_set_fault_handler(iommu_pt->domain,
				kgsl_iommu_fault_handler, pt);

	ret = iommu_domain_get_attr(iommu_pt->domain,
				DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
	if (ret) {
		dev_err(device->dev, "get DOMAIN_ATTR_PROCID failed: %d\n",
				ret);
		goto done;
	}

	ctx->cb_num = cb_num;
	ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
			+ (cb_num << KGSL_IOMMU_CB_SHIFT);

done:
	if (ret)
		_free_pt(ctx, pt);
	return ret;
}
#else
static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
	return -EPERM;
}
#endif

static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
@@ -1335,7 +1329,6 @@ static int _setup_secure_context(struct kgsl_mmu *mmu)
	int ret;
	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
	struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
	unsigned int cb_num;

	struct kgsl_iommu_pt *iommu_pt;

@@ -1348,21 +1341,11 @@ static int _setup_secure_context(struct kgsl_mmu *mmu)
	iommu_pt = mmu->securepagetable->priv;

	ret = _attach_pt(iommu_pt, ctx);
	if (ret)
		goto done;

	if (!ret) {
		ctx->default_pt = mmu->securepagetable;

	ret = iommu_domain_get_attr(iommu_pt->domain, DOMAIN_ATTR_CONTEXT_BANK,
					&cb_num);
	if (ret) {
		dev_err(KGSL_MMU_DEVICE(mmu)->dev,
			"get CONTEXT_BANK attr, err %d\n", ret);
		goto done;
		return 0;
	}
	ctx->cb_num = cb_num;
done:
	if (ret)

	_detach_context(ctx);
	return ret;
}
+30 −2
Original line number Diff line number Diff line
@@ -8,7 +8,6 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <soc/qcom/secure_buffer.h>

#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
@@ -404,6 +403,10 @@ static void kgsl_paged_unmap_kernel(struct kgsl_memdesc *memdesc)
	mutex_unlock(&kernel_map_global_lock);
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)

#include <soc/qcom/secure_buffer.h>

static int lock_sgt(struct sg_table *sgt, u64 size)
{
	struct scatterlist *sg;
@@ -460,6 +463,7 @@ static int unlock_sgt(struct sg_table *sgt)
		ClearPagePrivate(sg_page_iter_page(&sg_iter));
	return 0;
}
#endif

static int kgsl_paged_map_kernel(struct kgsl_memdesc *memdesc)
{
@@ -712,6 +716,7 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
	memdesc->ops->free(memdesc);
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
void kgsl_free_secure_page(struct page *page)
{
	struct sg_table sgt;
@@ -758,6 +763,16 @@ struct page *kgsl_alloc_secure_page(void)
	}
	return page;
}
#else
void kgsl_free_secure_page(struct page *page)
{
}

struct page *kgsl_alloc_secure_page(void)
{
	return NULL;
}
#endif

int
kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
@@ -1029,6 +1044,7 @@ static void kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
	_kgsl_contiguous_free(memdesc);
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static void kgsl_free_secure_pool_pages(struct kgsl_memdesc *memdesc)
{
	int ret = unlock_sgt(memdesc->sgt);
@@ -1054,6 +1070,7 @@ static void kgsl_free_secure_pool_pages(struct kgsl_memdesc *memdesc)

	memdesc->sgt = NULL;
}
#endif

static void kgsl_free_pool_pages(struct kgsl_memdesc *memdesc)
{
@@ -1076,11 +1093,12 @@ static struct kgsl_memdesc_ops kgsl_contiguous_ops = {
	.vmfault = kgsl_contiguous_vmfault,
};


#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static struct kgsl_memdesc_ops kgsl_secure_pool_ops = {
	.free = kgsl_free_secure_pool_pages,
	/* FIXME: Make sure vmflags / vmfault does the right thing here */
};
#endif

static struct kgsl_memdesc_ops kgsl_pool_ops = {
	.free = kgsl_free_pool_pages,
@@ -1090,6 +1108,7 @@ static struct kgsl_memdesc_ops kgsl_pool_ops = {
	.unmap_kernel = kgsl_paged_unmap_kernel,
};

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static int kgsl_alloc_secure_pages(struct kgsl_device *device,
		struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
{
@@ -1145,6 +1164,7 @@ static int kgsl_alloc_secure_pages(struct kgsl_device *device,

	return 0;
}
#endif

static int kgsl_alloc_pages(struct kgsl_device *device,
		struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
@@ -1229,11 +1249,19 @@ static int kgsl_alloc_contiguous(struct kgsl_device *device,
	return ret;
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static int kgsl_allocate_secure(struct kgsl_device *device,
		struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
{
	return kgsl_alloc_secure_pages(device, memdesc, size, flags, priv);
}
#else
static int kgsl_allocate_secure(struct kgsl_device *device,
		struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
{
	return -ENODEV;
}
#endif

int kgsl_allocate_user(struct kgsl_device *device, struct kgsl_memdesc *memdesc,
		u64 size, u64 flags, u32 priv)