Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 59a132a8 authored by Jordan Crouse's avatar Jordan Crouse Committed by Tarun Karra
Browse files

msm: kgsl: Enable 64 bit map if everybody is willing



All the pieces have now come into place to allow for 64 bit mappings.
The only thing left to do is to enable it so make it so.

Change-Id: Ic0dedbad3615a4e8dbef53f64adae6e28b610251
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 66f4f85a
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -103,6 +103,9 @@ Optional Properties:
- qcom,pm-qos-wakeup-latency:
				Similar to the above. Driver votes against deep low
				power modes right before GPU wakes up from sleep.
- qcom,force-32bit:
				Force the GPU to use 32 bit data sizes even if
				it is capable of doing 64 bit.

The following properties are optional as collecting data via coresight might
not be supported for every chipset. The documentation for coresight
+24 −4
Original line number Diff line number Diff line
@@ -290,6 +290,9 @@ static int kgsl_iommu_pdev_probe(struct platform_device *pdev)
	if (of_property_read_bool(pdev->dev.of_node, "qcom,hyp_secure_alloc"))
		data->features |= KGSL_MMU_HYP_SECURE_ALLOC;

	if (of_property_read_bool(pdev->dev.of_node, "qcom,force-32bit"))
		data->features |= KGSL_MMU_FORCE_32BIT;

	result = of_platform_populate(pdev->dev.of_node, iommu_match_table,
				NULL, &pdev->dev);
	if (!result)
@@ -1090,6 +1093,10 @@ int adreno_probe(struct platform_device *pdev)
	/* Identify the specific GPU */
	adreno_identify_gpu(adreno_dev);

	/* Bro, do you even 64 bit? */
	if (ADRENO_FEATURE(adreno_dev, ADRENO_64BIT))
		device->mmu.features |= KGSL_MMU_64BIT;

	status = kgsl_device_platform_probe(device);
	if (status) {
		device->pdev = NULL;
@@ -1385,12 +1392,25 @@ static int _adreno_start(struct adreno_device *adreno_dev)
				ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
		adreno_writereg(adreno_dev,
				ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);

		if (ADRENO_FEATURE(adreno_dev, ADRENO_64BIT) &&
			MMU_FEATURE(&device->mmu, KGSL_MMU_64BIT)) {
			adreno_writereg64(adreno_dev,
				ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
				ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
				KGSL_IOMMU_SECURE_BASE64);
			adreno_writereg(adreno_dev,
				ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
				KGSL_IOMMU_SECURE_SIZE64);
		} else {
			adreno_writereg64(adreno_dev,
				ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
				KGSL_IOMMU_SECURE_MEM_BASE);
				ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
				KGSL_IOMMU_SECURE_BASE32);
			adreno_writereg(adreno_dev,
				ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
				KGSL_IOMMU_SECURE_MEM_SIZE);
				KGSL_IOMMU_SECURE_SIZE32);
		}
	}

	status = adreno_ocmem_malloc(adreno_dev);
+1 −0
Original line number Diff line number Diff line
@@ -469,6 +469,7 @@ enum adreno_regs {
	ADRENO_REG_RBBM_SECVID_TRUST_CONFIG,
	ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
	ADRENO_REG_VBIF_XIN_HALT_CTRL0,
	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
+2 −0
Original line number Diff line number Diff line
@@ -2280,6 +2280,8 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A5XX_RBBM_SECVID_TSB_CNTL),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
				A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
				A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
				A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+12 −11
Original line number Diff line number Diff line
@@ -2423,8 +2423,7 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
			| KGSL_MEMALIGN_MASK
			| KGSL_MEMFLAGS_USE_CPU_MAP
			| KGSL_MEMFLAGS_SECURE;

	entry->memdesc.flags = param->flags;
	entry->memdesc.flags = param->flags | KGSL_MEMFLAGS_FORCE_32BIT;

	if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu))
		entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
@@ -2836,7 +2835,8 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
		| KGSL_MEMTYPE_MASK
		| KGSL_MEMALIGN_MASK
		| KGSL_MEMFLAGS_USE_CPU_MAP
		| KGSL_MEMFLAGS_SECURE;
		| KGSL_MEMFLAGS_SECURE
		| KGSL_MEMFLAGS_FORCE_32BIT;

	/* Turn off SVM if the system doesn't support it */
	if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu))
@@ -2850,10 +2850,10 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
		return ERR_PTR(-EOPNOTSUPP);
	}

	/* SVM and secure memory are not friends */
	if ((flags & KGSL_MEMFLAGS_SECURE) &&
		(flags & KGSL_MEMFLAGS_USE_CPU_MAP))
		flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
	/* Secure memory disables advanced addressing modes */
	if (flags & KGSL_MEMFLAGS_SECURE)
		flags &= ~(KGSL_MEMFLAGS_USE_CPU_MAP
			   | KGSL_MEMFLAGS_FORCE_32BIT);

	/* Cap the alignment bits to the highest number we can handle */
	align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
@@ -2917,9 +2917,6 @@ long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
	struct kgsl_gpuobj_alloc *param = data;
	struct kgsl_mem_entry *entry;

	/* All allocations should use SVM if it is available */
	param->flags |= KGSL_MEMFLAGS_USE_CPU_MAP;

	entry = gpumem_alloc_entry(dev_priv, param->size,
		param->va_len, param->flags);

@@ -2943,6 +2940,7 @@ long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,

	/* Legacy functions doesn't support these advanced features */
	flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
	flags |= KGSL_MEMFLAGS_FORCE_32BIT;

	entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size,
		(uint64_t) param->size, flags);
@@ -2964,6 +2962,8 @@ long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
	struct kgsl_mem_entry *entry;
	uint64_t flags = param->flags;

	flags |= KGSL_MEMFLAGS_FORCE_32BIT;

	entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size,
		(uint64_t) param->mmapsize, flags);

@@ -3362,7 +3362,8 @@ static unsigned long _get_svm_area(struct kgsl_process_private *private,
		align = SZ_4K;

	/* get the GPU pagetable's SVM range */
	if (kgsl_mmu_svm_range(private->pagetable, &start, &end))
	if (kgsl_mmu_svm_range(private->pagetable, &start, &end,
				entry->memdesc.flags))
		return -ERANGE;

	/* now clamp the range based on the CPU's requirements */
Loading