Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b3e31a5d authored by Tarun Karra's avatar Tarun Karra
Browse files

msm: kgsl: Implement mutex between SMMU operations and GPU power collapse



Implement mutex between SMMU operations and GPU power collapse
to prevent a glitch between GPU and SMMU on 8996v2. This mutex
prevents a condition where GPU vreg gets turned on/off at the
same time as SMMU is turning on/off GPU CX vreg.

Change-Id: I3011f9850ac0eb393d4ba6765c5803ea380013fd
Signed-off-by: default avatarTarun Karra <tkarra@codeaurora.org>
parent 85a13391
Loading
Loading
Loading
Loading
+6 −14
Original line number Original line Diff line number Diff line
@@ -1341,9 +1341,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)
{
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct kgsl_device *device = &adreno_dev->dev;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int i, status = -EINVAL;
	int status = -EINVAL;
	unsigned int state = device->state;
	unsigned int state = device->state;
	unsigned int regulator_left_on = 0;
	unsigned int pmqos_wakeup_vote = device->pwrctrl.pm_qos_wakeup_latency;
	unsigned int pmqos_wakeup_vote = device->pwrctrl.pm_qos_wakeup_latency;
	unsigned int pmqos_active_vote = device->pwrctrl.pm_qos_active_latency;
	unsigned int pmqos_active_vote = device->pwrctrl.pm_qos_active_latency;


@@ -1355,27 +1354,20 @@ static int _adreno_start(struct adreno_device *adreno_dev)


	kgsl_cffdump_open(device);
	kgsl_cffdump_open(device);


	for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
		if (device->pwrctrl.gpu_reg[i] &&
			regulator_is_enabled(device->pwrctrl.gpu_reg[i])) {
			regulator_left_on = 1;
			break;
		}
	}

	/* Clear any GPU faults that might have been left over */
	adreno_clear_gpu_fault(adreno_dev);

	/* Put the GPU in a responsive state */
	/* Put the GPU in a responsive state */
	device->regulator_left_on = false;
	status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
	status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
	if (status)
	if (status)
		goto error_pwr_off;
		goto error_pwr_off;


	/* Clear any GPU faults that might have been left over */
	adreno_clear_gpu_fault(adreno_dev);

	/* Set the bit to indicate that we've just powered on */
	/* Set the bit to indicate that we've just powered on */
	set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
	set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);


	/* Soft reset the GPU if a regulator is stuck on*/
	/* Soft reset the GPU if a regulator is stuck on*/
	if (regulator_left_on)
	if (device->regulator_left_on)
		_soft_reset(adreno_dev);
		_soft_reset(adreno_dev);


	status = kgsl_mmu_start(device);
	status = kgsl_mmu_start(device);
+3 −0
Original line number Original line Diff line number Diff line
@@ -250,6 +250,7 @@ struct kgsl_device {
	int open_count;
	int open_count;


	struct mutex mutex;
	struct mutex mutex;
	struct mutex mutex_pc_smmu;
	uint32_t state;
	uint32_t state;
	uint32_t requested_state;
	uint32_t requested_state;


@@ -289,6 +290,7 @@ struct kgsl_device {
	struct workqueue_struct *events_wq;
	struct workqueue_struct *events_wq;


	struct device *busmondev; /* pseudo dev for GPU BW voting governor */
	struct device *busmondev; /* pseudo dev for GPU BW voting governor */
	bool regulator_left_on;
};
};




@@ -303,6 +305,7 @@ struct kgsl_device {
	.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
	.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
	.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
	.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
	.mutex = __MUTEX_INITIALIZER((_dev).mutex),\
	.mutex = __MUTEX_INITIALIZER((_dev).mutex),\
	.mutex_pc_smmu = __MUTEX_INITIALIZER((_dev).mutex_pc_smmu),\
	.state = KGSL_STATE_NONE,\
	.state = KGSL_STATE_NONE,\
	.ver_major = DRIVER_VERSION_MAJOR,\
	.ver_major = DRIVER_VERSION_MAJOR,\
	.ver_minor = DRIVER_VERSION_MINOR
	.ver_minor = DRIVER_VERSION_MINOR
+19 −3
Original line number Original line Diff line number Diff line
@@ -1109,13 +1109,18 @@ kgsl_iommu_unmap(struct kgsl_pagetable *pt,
		mutex_lock(&device->mutex);
		mutex_lock(&device->mutex);
		ret = kgsl_active_count_get(device);
		ret = kgsl_active_count_get(device);
		if (!ret) {
		if (!ret) {
			mutex_lock(&device->mutex_pc_smmu);
			unmapped = iommu_unmap(iommu_pt->domain, gpuaddr,
			unmapped = iommu_unmap(iommu_pt->domain, gpuaddr,
					range);
					range);
			mutex_unlock(&device->mutex_pc_smmu);
			kgsl_active_count_put(device);
			kgsl_active_count_put(device);
		}
		}
		mutex_unlock(&device->mutex);
		mutex_unlock(&device->mutex);
	} else
	} else {
		mutex_lock(&device->mutex_pc_smmu);
		unmapped = iommu_unmap(iommu_pt->domain, gpuaddr, range);
		unmapped = iommu_unmap(iommu_pt->domain, gpuaddr, range);
		mutex_unlock(&device->mutex_pc_smmu);
	}
	if (unmapped != range) {
	if (unmapped != range) {
		KGSL_CORE_ERR(
		KGSL_CORE_ERR(
			"iommu_unmap(%p, %llx, %lld) failed with unmapped size: %zd\n",
			"iommu_unmap(%p, %llx, %lld) failed with unmapped size: %zd\n",
@@ -1174,9 +1179,11 @@ int _iommu_add_guard_page(struct kgsl_pagetable *pt,
			physaddr = kgsl_secure_guard_page_memdesc.physaddr;
			physaddr = kgsl_secure_guard_page_memdesc.physaddr;
		}
		}


		mutex_lock(&pt->mmu->device->mutex_pc_smmu);
		ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr,
		ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr,
				kgsl_memdesc_guard_page_size(memdesc),
				kgsl_memdesc_guard_page_size(memdesc),
				protflags & ~IOMMU_WRITE);
				protflags & ~IOMMU_WRITE);
		mutex_unlock(&pt->mmu->device->mutex_pc_smmu);
		if (ret) {
		if (ret) {
			KGSL_CORE_ERR(
			KGSL_CORE_ERR(
			"iommu_map(%p, addr %016llX, flags %x) err: %d\n",
			"iommu_map(%p, addr %016llX, flags %x) err: %d\n",
@@ -1224,15 +1231,21 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
		mutex_lock(&device->mutex);
		mutex_lock(&device->mutex);
		ret = kgsl_active_count_get(device);
		ret = kgsl_active_count_get(device);
		if (!ret) {
		if (!ret) {
			mutex_lock(&device->mutex_pc_smmu);
			mapped = iommu_map_sg(iommu_pt->domain, addr,
			mapped = iommu_map_sg(iommu_pt->domain, addr,
					memdesc->sgt->sgl, memdesc->sgt->nents,
					memdesc->sgt->sgl, memdesc->sgt->nents,
					flags);
					flags);
			mutex_unlock(&device->mutex_pc_smmu);
			kgsl_active_count_put(device);
			kgsl_active_count_put(device);
		}
		}
		mutex_unlock(&device->mutex);
		mutex_unlock(&device->mutex);
	} else
	} else {
		mutex_lock(&device->mutex_pc_smmu);
		mapped = iommu_map_sg(iommu_pt->domain, addr,
		mapped = iommu_map_sg(iommu_pt->domain, addr,
				memdesc->sgt->sgl, memdesc->sgt->nents, flags);
				memdesc->sgt->sgl, memdesc->sgt->nents, flags);
		mutex_unlock(&device->mutex_pc_smmu);
	}



	if (mapped != size) {
	if (mapped != size) {
		KGSL_CORE_ERR("iommu_map_sg(%p, %016llX, %lld, %x) err: %zd\n",
		KGSL_CORE_ERR("iommu_map_sg(%p, %016llX, %lld, %x) err: %zd\n",
@@ -1242,9 +1255,12 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
	}
	}


	ret = _iommu_add_guard_page(pt, memdesc, addr + size, flags);
	ret = _iommu_add_guard_page(pt, memdesc, addr + size, flags);
	if (ret)
	if (ret) {
		/* cleanup the partial mapping */
		/* cleanup the partial mapping */
		mutex_lock(&device->mutex_pc_smmu);
		iommu_unmap(iommu_pt->domain, addr, size);
		iommu_unmap(iommu_pt->domain, addr, size);
		mutex_unlock(&device->mutex_pc_smmu);
	}


	/*
	/*
	 *  IOMMU V1 BFBs pre-fetch data beyond what is being used by the core.
	 *  IOMMU V1 BFBs pre-fetch data beyond what is being used by the core.
+9 −0
Original line number Original line Diff line number Diff line
@@ -1286,15 +1286,21 @@ static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
			&pwr->power_flags)) {
			&pwr->power_flags)) {
			trace_kgsl_rail(device, state);
			trace_kgsl_rail(device, state);
			mutex_lock(&device->mutex_pc_smmu);
			for (i = KGSL_MAX_REGULATORS - 1; i >= 0; i--) {
			for (i = KGSL_MAX_REGULATORS - 1; i >= 0; i--) {
				if (pwr->gpu_reg[i])
				if (pwr->gpu_reg[i])
					regulator_disable(pwr->gpu_reg[i]);
					regulator_disable(pwr->gpu_reg[i]);
			}
			}
			mutex_unlock(&device->mutex_pc_smmu);
		}
		}
	} else if (state == KGSL_PWRFLAGS_ON) {
	} else if (state == KGSL_PWRFLAGS_ON) {
		if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
		if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
			&pwr->power_flags)) {
			&pwr->power_flags)) {
			mutex_lock(&device->mutex_pc_smmu);
			for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
			for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
				if (regulator_is_enabled(
				    device->pwrctrl.gpu_reg[i]))
					device->regulator_left_on = true;
				if (pwr->gpu_reg[i])
				if (pwr->gpu_reg[i])
					status = regulator_enable(
					status = regulator_enable(
							pwr->gpu_reg[i]);
							pwr->gpu_reg[i]);
@@ -1306,13 +1312,16 @@ static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
					break;
					break;
				}
				}
			}
			}
			mutex_unlock(&device->mutex_pc_smmu);


			if (status) {
			if (status) {
				mutex_lock(&device->mutex_pc_smmu);
				for (j = i - 1; j >= 0; j--) {
				for (j = i - 1; j >= 0; j--) {
					if (pwr->gpu_reg[j])
					if (pwr->gpu_reg[j])
						regulator_disable(
						regulator_disable(
							pwr->gpu_reg[j]);
							pwr->gpu_reg[j]);
				}
				}
				mutex_unlock(&device->mutex_pc_smmu);
				clear_bit(KGSL_PWRFLAGS_POWER_ON,
				clear_bit(KGSL_PWRFLAGS_POWER_ON,
					&pwr->power_flags);
					&pwr->power_flags);
			} else
			} else