Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f97e63f7 authored by Sravankumar bijili's avatar Sravankumar bijili
Browse files

msm: kgsl: Trigger recovery on gmu fenced write failure



In preemption trigger sequence if any of the gmu fenced
register fails mark it as gmu fault. So that recovery can
triggered on fenced write failure.

Change-Id: Ic3566bcae717d1a90fa8b3da1b644ca6470f488f
Signed-off-by: default avatarHareesh Gundu <hareeshg@codeaurora.org>
Signed-off-by: default avatarSravankumar bijili <csbijil@codeaurora.org>
parent b39b9019
Loading
Loading
Loading
Loading
+47 −31
Original line number Original line Diff line number Diff line
@@ -319,52 +319,53 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
	 * Fenced writes on this path will make sure the GPU is woken up
	 * Fenced writes on this path will make sure the GPU is woken up
	 * in case it was power collapsed by the GMU.
	 * in case it was power collapsed by the GMU.
	 */
	 */
	adreno_gmu_fenced_write(adreno_dev,
	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
		lower_32_bits(next->preemption_desc.gpuaddr),
		lower_32_bits(next->preemption_desc.gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK);
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;

	/*
	 * Above fence writes will make sure GMU comes out of
	 * IFPC state if its was in IFPC state but it doesn't
	 * guarantee that GMU FW actually moved to ACTIVE state
	 * i.e. wake-up from IFPC is complete.
	 * Wait for GMU to move to ACTIVE state before triggering
	 * preemption. This is require to make sure CP doesn't
	 * interrupt GMU during wake-up from IFPC.
	 */
	if (gmu_core_dev_wait_for_active_transition(device))
		goto err;


	adreno_gmu_fenced_write(adreno_dev,
	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
		upper_32_bits(next->preemption_desc.gpuaddr),
		upper_32_bits(next->preemption_desc.gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK);
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;


	adreno_gmu_fenced_write(adreno_dev,
	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
		lower_32_bits(next->secure_preemption_desc.gpuaddr),
		lower_32_bits(next->secure_preemption_desc.gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK);
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;


	adreno_gmu_fenced_write(adreno_dev,
	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
		upper_32_bits(next->secure_preemption_desc.gpuaddr),
		upper_32_bits(next->secure_preemption_desc.gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK);
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;


	adreno_gmu_fenced_write(adreno_dev,
	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
		lower_32_bits(gpuaddr),
		lower_32_bits(gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK);
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;


	adreno_gmu_fenced_write(adreno_dev,
	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
		upper_32_bits(gpuaddr),
		upper_32_bits(gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK);
		FENCE_STATUS_WRITEDROPPED1_MASK))

		goto err;
	/*
	 * Above fence writes will make sure GMU comes out of
	 * IFPC state if its was in IFPC state but it doesn't
	 * guarantee that GMU FW actually moved to ACTIVE state
	 * i.e. wake-up from IFPC is complete.
	 * Wait for GMU to move to ACTIVE state before triggering
	 * preemption. This is require to make sure CP doesn't
	 * interrupt GMU during wake-up from IFPC.
	 */
	if (gmu_core_dev_wait_for_active_transition(device)) {
		adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);

		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
		adreno_dispatcher_schedule(device);
		return;
	}


	adreno_dev->next_rb = next;
	adreno_dev->next_rb = next;


@@ -378,8 +379,23 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
	adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
	adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);


	/* Trigger the preemption */
	/* Trigger the preemption */
	adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl,
	if (adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl,
		FENCE_STATUS_WRITEDROPPED1_MASK);
					FENCE_STATUS_WRITEDROPPED1_MASK)) {
		adreno_dev->next_rb = NULL;
		del_timer(&adreno_dev->preempt.timer);
		goto err;
	}

	return;
err:

	/* If fenced write fails, set the fault and trigger recovery */
	adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
	adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
	adreno_dispatcher_schedule(device);
	/* Clear the keep alive */
	if (gmu_core_isenabled(device))
		gmu_core_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0);
}
}


void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)