Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7d58ea4 authored by Sravankumar bijili's avatar Sravankumar bijili
Browse files

msm: kgsl: Dump always ON counter in adreno_irq_handler()



For an error case if irq is raised when GX is OFF, it is
important to have always ON counter data in irq handler
to analyze GMU function trace. So add support to dump
always ON counter value in adreno_irq_handler().

Change-Id: I5c976f52053d65983b1c37b196e800ecc700cf2d
Signed-off-by: default avatarHareesh Gundu <hareeshg@codeaurora.org>
Signed-off-by: default avatarSravankumar bijili <csbijil@codeaurora.org>
parent ed5f8272
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -614,6 +614,9 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
	unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
	unsigned int shadow_status = 0;
	int i;
	u64 ts, ts1, ts2;

	ts = gmu_core_dev_read_ao_counter(device);

	atomic_inc(&adreno_dev->pending_irq_refcnt);
	/* Ensure this increment is done before the IRQ status is updated */
@@ -640,6 +643,8 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
				&fence);

		while (fence != 0) {
			ts1 =  gmu_core_dev_read_ao_counter(device);

			/* Wait for small time before trying again */
			udelay(1);
			adreno_readreg(adreno_dev,
@@ -647,18 +652,19 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
					&fence);

			if (fence_retries == FENCE_RETRY_MAX && fence != 0) {
				ts2 =  gmu_core_dev_read_ao_counter(device);

				adreno_readreg(adreno_dev,
					ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
					&shadow_status);

				KGSL_DRV_CRIT_RATELIMIT(device,
					"Status=0x%x Unmasked status=0x%x Mask=0x%x\n",
					"Status=0x%x Unmasked status=0x%x Timestamps:%llx %llx %llx\n",
					shadow_status & irq_params->mask,
					shadow_status, irq_params->mask);
					shadow_status, ts, ts1, ts2);
				adreno_set_gpu_fault(adreno_dev,
						ADRENO_GMU_FAULT);
				adreno_dispatcher_schedule(KGSL_DEVICE
						(adreno_dev));
				adreno_dispatcher_schedule(device);
				goto done;
			}
			fence_retries++;
+1 −25
Original line number Diff line number Diff line
@@ -143,31 +143,6 @@ static inline int timed_poll_check(struct kgsl_device *device,
	return -ETIMEDOUT;
}

/*
 * read_AO_counter() - Returns the 64bit always on counter value
 *
 * @device: Pointer to KGSL device
 */
static inline uint64_t read_AO_counter(struct kgsl_device *device)
{
	unsigned int l, h, h1;

	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);

	/*
	 * If there's no change in COUNTER_H we have no overflow so return,
	 * otherwise read COUNTER_L again
	 */

	if (h == h1)
		return (uint64_t) l | ((uint64_t) h << 32);

	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
	return (uint64_t) l | ((uint64_t) h1 << 32);
}

/* Preemption functions */
void a6xx_preemption_trigger(struct adreno_device *adreno_dev);
void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
@@ -198,4 +173,5 @@ void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);
size_t a6xx_snapshot_preemption(struct kgsl_device *device, u8 *buf,
		size_t remain, void *priv);
u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device);
#endif
+31 −5
Original line number Diff line number Diff line
@@ -901,7 +901,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	ts1 = read_AO_counter(device);
	ts1 = a6xx_gmu_read_ao_counter(device);

	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
	do {
@@ -916,7 +916,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	ts2 = read_AO_counter(device);
	ts2 = a6xx_gmu_read_ao_counter(device);
	/* Check one last time */

	gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
@@ -925,7 +925,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
	if (idle_transition_complete(gmu->idle_level, reg, reg1))
		return 0;

	ts3 = read_AO_counter(device);
	ts3 = a6xx_gmu_read_ao_counter(device);

	/* Collect abort data to help with debugging */
	gmu_core_regread(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg2);
@@ -966,14 +966,14 @@ static int a6xx_gmu_wait_for_idle(struct adreno_device *adreno_dev)
	unsigned int status2;
	uint64_t ts1;

	ts1 = read_AO_counter(device);
	ts1 = a6xx_gmu_read_ao_counter(device);
	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
			0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
		gmu_core_regread(device,
				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
		dev_err(&gmu->pdev->dev,
				"GMU not idling: status2=0x%x %llx %llx\n",
				status2, ts1, read_AO_counter(device));
				status2, ts1, a6xx_gmu_read_ao_counter(device));
		return -ETIMEDOUT;
	}

@@ -1634,6 +1634,31 @@ static bool a6xx_gmu_is_initialized(struct adreno_device *adreno_dev)
	return (val == PDC_ENABLE_REG_VALUE);
}

/*
 * a6xx_gmu_read_ao_counter() - Returns the 64bit always on counter value
 *
 * @device: Pointer to KGSL device
 */
u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device)
{
	unsigned int l, h, h1;

	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);

	/*
	 * If there's no change in COUNTER_H we have no overflow so return,
	 * otherwise read COUNTER_L again
	 */

	if (h == h1)
		return (uint64_t) l | ((uint64_t) h << 32);

	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
	return (uint64_t) l | ((uint64_t) h1 << 32);
}

struct gmu_dev_ops adreno_a6xx_gmudev = {
	.load_firmware = a6xx_gmu_load_firmware,
	.oob_set = a6xx_gmu_oob_set,
@@ -1651,6 +1676,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = {
	.snapshot = a6xx_gmu_snapshot,
	.wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
	.is_initialized = a6xx_gmu_is_initialized,
	.read_ao_counter = a6xx_gmu_read_ao_counter,
	.gmu2host_intr_mask = HFI_IRQ_MASK,
	.gmu_ao_intr_mask = GMU_AO_INT_MASK,
};
+4 −3
Original line number Diff line number Diff line
@@ -290,7 +290,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
			rgmu->idle_level != GPU_HW_IFPC)
		return 0;

	ts1 = read_AO_counter(device);
	ts1 = a6xx_gmu_read_ao_counter(device);

	t = jiffies + msecs_to_jiffies(RGMU_IDLE_TIMEOUT);
	do {
@@ -304,7 +304,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	ts2 = read_AO_counter(device);
	ts2 = a6xx_gmu_read_ao_counter(device);

	/* Do one last read incase it succeeds */
	gmu_core_regread(device,
@@ -313,7 +313,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
	if (reg[0] & GX_GDSC_POWER_OFF)
		return 0;

	ts3 = read_AO_counter(device);
	ts3 = a6xx_gmu_read_ao_counter(device);

	/* Collect abort data to help with debugging */
	gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &reg[1]);
@@ -603,6 +603,7 @@ struct gmu_dev_ops adreno_a6xx_rgmudev = {
	.ifpc_show = a6xx_rgmu_ifpc_show,
	.snapshot = a6xx_rgmu_snapshot,
	.halt_execution = a6xx_rgmu_halt_execution,
	.read_ao_counter = a6xx_gmu_read_ao_counter,
	.gmu2host_intr_mask = RGMU_OOB_IRQ_MASK,
	.gmu_ao_intr_mask = RGMU_AO_IRQ_MASK,
};
+9 −0
Original line number Diff line number Diff line
@@ -285,3 +285,12 @@ bool gmu_core_is_initialized(struct kgsl_device *device)
	return false;
}

u64 gmu_core_dev_read_ao_counter(struct kgsl_device *device)
{
	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);

	if (ops && ops->read_ao_counter)
		return ops->read_ao_counter(device);

	return 0;
}
Loading