Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d6fd426 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: Fix UCHE to GMEM VA align and SVM base addr"

parents 540d7a3c 8fe87370
Loading
Loading
Loading
Loading
+11 −1
Original line number Diff line number Diff line
@@ -834,7 +834,10 @@ adreno_identify_gpu(struct adreno_device *adreno_dev)
	 */

	adreno_dev->gmem_size = adreno_dev->gpucore->gmem_size;
	adreno_dev->uche_gmem_base = ALIGN(adreno_dev->gmem_size, SZ_4K);

	/* UCHE to GMEM base address requires 1MB alignment */
	adreno_dev->uche_gmem_base = ALIGN(adreno_dev->gmem_size, SZ_1M);

	/*
	 * Initialize uninitialzed gpu registers, only needs to be done once
	 * Make all offsets that are not initialized to ADRENO_REG_UNUSED
@@ -1379,6 +1382,13 @@ static int adreno_probe(struct platform_device *pdev)
	/* Default to 4K alignment (in other words, no additional padding) */
	device->mmu.va_padding = PAGE_SIZE;

	/*
	 * SVM start va can be calculated based on UCHE GMEM size.
	 * UCHE_GMEM_MAX < (SP LOCAL & PRIVATE) < MMU SVA
	 */
	device->mmu.svm_base32 = KGSL_IOMMU_SVM_BASE32 +
		((ALIGN(adreno_dev->gmem_size, SZ_1M) - SZ_1M) << 1);

	if (adreno_dev->gpucore->va_padding) {
		device->mmu.features |= KGSL_MMU_PAD_VA;
		device->mmu.va_padding = adreno_dev->gpucore->va_padding;
+5 −5
Original line number Diff line number Diff line
@@ -1081,7 +1081,7 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
		pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
		pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
	} else {
		pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
		pt->compat_va_start = mmu->svm_base32;
		pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
		pt->va_start = KGSL_IOMMU_VA_BASE64;
		pt->va_end = KGSL_IOMMU_VA_END64;
@@ -1090,7 +1090,7 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
	if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
		pagetable->name != KGSL_MMU_SECURE_PT) {
		if (kgsl_is_compat_task()) {
			pt->svm_start = KGSL_IOMMU_SVM_BASE32;
			pt->svm_start = mmu->svm_base32;
			pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
		} else {
			pt->svm_start = KGSL_IOMMU_SVM_BASE64;
@@ -1110,13 +1110,13 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
			pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
			pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
		} else {
			pt->va_start = KGSL_IOMMU_SVM_BASE32;
			pt->va_start = mmu->svm_base32;
			pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
			pt->compat_va_start = pt->va_start;
			pt->compat_va_end = pt->va_end;
		}
	} else {
		pt->va_start = KGSL_IOMMU_SVM_BASE32;
		pt->va_start = mmu->svm_base32;
		pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
		pt->compat_va_start = pt->va_start;
		pt->compat_va_end = pt->va_end;
@@ -1124,7 +1124,7 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,

	if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
		pagetable->name != KGSL_MMU_SECURE_PT) {
		pt->svm_start = KGSL_IOMMU_SVM_BASE32;
		pt->svm_start = mmu->svm_base32;
		pt->svm_end = KGSL_IOMMU_SVM_END32;
	}
}
+3 −1
Original line number Diff line number Diff line
/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -154,6 +154,7 @@ struct kgsl_mmu_pt_ops {
 * @feature: Static list of MMU features
 * @secure_aligned_mask: Mask that secure buffers need to be aligned to
 * @va_padding: Size to pad VA mappings to
 * @svm_base32: MMU 32bit VA start address
 * @priv: Union of sub-device specific members
 */
struct kgsl_mmu {
@@ -166,6 +167,7 @@ struct kgsl_mmu {
	unsigned long features;
	unsigned int secure_align_mask;
	uint64_t va_padding;
	unsigned int svm_base32;
	union {
		struct kgsl_iommu iommu;
	} priv;