Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f40833f authored by Harshdeep Dhatt's avatar Harshdeep Dhatt Committed by Tarun Karra
Browse files

msm: kgsl: Change the secure pagetable initialization sequence



We want to allocate multiple secure global buffers, but we don't
want to store them in an array like we store normal global buffers.
This is because we only ever map the secure global buffers once,
to the secure pagetable at the time of device initialization.
Hence, the secure pagetable needs to be initialized beforehand.
The device specific code must explicitly map/unmap the secure global
buffers during device init/close.

Change-Id: Ic2aa37ca93ab5fc966174493b06f42a873b7d4a0
Signed-off-by: default avatarHarshdeep Dhatt <hdhatt@codeaurora.org>
parent 517a067e
Loading
Loading
Loading
Loading
+12 −3
Original line number Diff line number Diff line
@@ -193,6 +193,8 @@ static void a5xx_critical_packet_destroy(struct adreno_device *adreno_dev)
	kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf2);
	kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf3);

	kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev),
			&crit_pkts_refbuf0);
	kgsl_sharedmem_free(&crit_pkts_refbuf0);

}
@@ -231,8 +233,10 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
	if (ret)
		return ret;

	kgsl_add_global_secure_entry(&adreno_dev->dev,
	ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev,
					&crit_pkts_refbuf0);
	if (ret)
		return ret;

	ret = kgsl_allocate_global(&adreno_dev->dev,
					&crit_pkts_refbuf1,
@@ -293,8 +297,13 @@ static void a5xx_init(struct adreno_device *adreno_dev)

	INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker);

	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
		a5xx_critical_packet_construct(adreno_dev);
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
		int ret;

		ret = a5xx_critical_packet_construct(adreno_dev);
		if (ret)
			a5xx_critical_packet_destroy(adreno_dev);
	}

	a5xx_crashdump_init(adreno_dev);
}
+43 −40
Original line number Diff line number Diff line
@@ -110,7 +110,7 @@ struct global_pt_entry {
};

static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
static int secure_global_size;
static int global_pt_count;
uint64_t global_pt_alloc;
static struct kgsl_memdesc gpu_qdss_desc;
@@ -162,24 +162,33 @@ static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
	return 0;
}

static void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_pagetable
								*pagetable)
void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
				struct kgsl_memdesc *entry)
{
	struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
	if (!kgsl_mmu_is_secured(&device->mmu))
		return;

	if (entry != NULL)
		kgsl_mmu_unmap(pagetable, entry);
	if (entry != NULL && entry->pagetable->name == KGSL_MMU_SECURE_PT)
		kgsl_mmu_unmap(entry->pagetable, entry);

}

static int kgsl_map_global_secure_pt_entry(struct kgsl_pagetable *pagetable)
int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
				struct kgsl_memdesc *entry)
{
	int ret = 0;
	struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;

	if (!kgsl_mmu_is_secured(&device->mmu))
		return -ENOTSUPP;

	if (entry != NULL) {
		struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
		entry->pagetable = pagetable;
		entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size;

		ret = kgsl_mmu_map(pagetable, entry);
		if (ret == 0)
			secure_global_size += entry->size;
	}
	return ret;
}
@@ -224,13 +233,6 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
	global_pt_count++;
}

void kgsl_add_global_secure_entry(struct kgsl_device *device,
					struct kgsl_memdesc *memdesc)
{
	memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE;
	kgsl_global_secure_pt_entry = memdesc;
}

struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
{
	return &gpu_qdss_desc;
@@ -1068,7 +1070,6 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)

	if (pt->name == KGSL_MMU_SECURE_PT) {
		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
		kgsl_iommu_unmap_global_secure_pt_entry(pt);
	} else {
		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
		kgsl_iommu_unmap_globals(pt);
@@ -1089,13 +1090,10 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
		struct kgsl_pagetable *pagetable,
		struct kgsl_iommu_pt *pt)
{
	unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
					kgsl_global_secure_pt_entry->size : 0;
	if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
		pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
						secure_global_size;
		pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
		pt->compat_va_end = KGSL_IOMMU_SECURE_END;
		pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size;
		pt->va_start = KGSL_IOMMU_SECURE_BASE;
		pt->va_end = KGSL_IOMMU_SECURE_END;
	} else {
		pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
@@ -1120,15 +1118,11 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
		struct kgsl_pagetable *pagetable,
		struct kgsl_iommu_pt *pt)
{
	unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
					kgsl_global_secure_pt_entry->size : 0;
	if (mmu->secured) {
		if (pagetable->name == KGSL_MMU_SECURE_PT) {
			pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
						secure_global_size;
			pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
			pt->compat_va_end = KGSL_IOMMU_SECURE_END;
			pt->va_start = KGSL_IOMMU_SECURE_BASE +
						secure_global_size;
			pt->va_start = KGSL_IOMMU_SECURE_BASE;
			pt->va_end = KGSL_IOMMU_SECURE_END;
		} else {
			pt->va_start = KGSL_IOMMU_SVM_BASE32;
@@ -1362,8 +1356,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
	ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
			+ (cb_num << KGSL_IOMMU_CB_SHIFT);

	ret = kgsl_map_global_secure_pt_entry(pt);

done:
	if (ret)
		_free_pt(ctx, pt);
@@ -1607,6 +1599,18 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
	kgsl_setup_qdss_desc(device);
	kgsl_setup_qtimer_desc(device);

	if (!mmu->secured)
		goto done;

	mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
				KGSL_MMU_SECURE_PT);
	if (IS_ERR(mmu->securepagetable)) {
		status = PTR_ERR(mmu->securepagetable);
		mmu->securepagetable = NULL;
	} else if (mmu->securepagetable == NULL) {
		status = -ENOMEM;
	}

done:
	if (status)
		kgsl_iommu_close(mmu);
@@ -1688,17 +1692,9 @@ static int _setup_secure_context(struct kgsl_mmu *mmu)
	if (ctx->dev == NULL || !mmu->secured)
		return 0;

	if (mmu->securepagetable == NULL) {
		mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
						KGSL_MMU_SECURE_PT);
		if (IS_ERR(mmu->securepagetable)) {
			ret = PTR_ERR(mmu->securepagetable);
			mmu->securepagetable = NULL;
			return ret;
		} else if (mmu->securepagetable == NULL) {
	if (mmu->securepagetable == NULL)
		return -ENOMEM;
		}
	}

	iommu_pt = mmu->securepagetable->priv;

	ret = _attach_pt(iommu_pt, ctx);
@@ -2501,6 +2497,13 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
		end = pt->va_end;
	}

	/*
	 * When mapping secure buffers, adjust the start of the va range
	 * to the end of secure global buffers.
	 */
	if (kgsl_memdesc_is_secured(memdesc))
		start += secure_global_size;

	spin_lock(&pagetable->lock);

	addr = _get_unmapped_area(pagetable, start, end, size, align);
+3 −1
Original line number Diff line number Diff line
@@ -174,7 +174,9 @@ int kgsl_mmu_start(struct kgsl_device *device);
struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *mmu,
						u64 ptbase);

void kgsl_add_global_secure_entry(struct kgsl_device *device,
int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
					struct kgsl_memdesc *memdesc);
void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
					struct kgsl_memdesc *memdesc);
void kgsl_print_global_pt_entries(struct seq_file *s);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);