Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4033f562 authored by Kyle Piefer's avatar Kyle Piefer Committed by Gerrit - the friendly Code Review server
Browse files

msm: kgsl: Update idle check for new GMU firmware



The new GMU firmware uses the same value to represent
the SPTPRAC PC and IFPC idle levels. If we are doing
IFPC, GX must be off for IFPC to be complete.
Update our idle check to take these criteria into account.
We must also check for idle before finishing adreno_stop().
Factor out this check and add it there as well.

CRs-Fixed: 2085877
Change-Id: Idd252aa4c91c640406907424dff39e2f9b6f5b6c
Signed-off-by: default avatarKyle Piefer <kpiefer@codeaurora.org>
parent b09325dc
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -1678,6 +1678,7 @@ static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
static int adreno_stop(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
		return 0;
@@ -1705,6 +1706,16 @@ static int adreno_stop(struct kgsl_device *device)
	/* Save physical performance counter values before GPU power down*/
	adreno_perfcounter_save(adreno_dev);

	/*
	 * Saving perfcounters will use an OOB to put the GMU into
	 * active state. Before continuing, we should wait for the
	 * GMU to return to the lowest idle level. This is
	 * because some idle level transitions require VBIF and MMU.
	 */
	if (gpudev->wait_for_lowest_idle &&
			gpudev->wait_for_lowest_idle(adreno_dev))
		return -EINVAL;

	adreno_vbif_clear_pending_transactions(device);

	kgsl_mmu_stop(&device->mmu);
+1 −0
Original line number Diff line number Diff line
@@ -887,6 +887,7 @@ struct adreno_gpudev {
	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
				unsigned int arg1, unsigned int arg2);
	bool (*hw_isidle)(struct adreno_device *);
	int (*wait_for_lowest_idle)(struct adreno_device *);
	int (*wait_for_gmu_idle)(struct adreno_device *);
	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
				unsigned int fsynr1);
+48 −0
Original line number Diff line number Diff line
@@ -1747,6 +1747,53 @@ static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
	return true;
}

static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = &device->gmu;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int reg;
	unsigned long t;

	if (!kgsl_gmu_isenabled(device))
		return 0;

	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
	while (!time_after(jiffies, t)) {
		adreno_read_gmureg(ADRENO_DEVICE(device),
				ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);

		/* SPTPRAC PC has the same idle level as IFPC */
		if ((reg == gmu->idle_level) ||
				(gmu->idle_level == GPU_HW_SPTP_PC &&
				reg == GPU_HW_IFPC)) {
			/* IFPC is not complete until GX is off */
			if (gmu->idle_level != GPU_HW_IFPC ||
					!gpudev->gx_is_on(adreno_dev))
				return 0;
		}

		/* Wait 100us to reduce unnecessary AHB bus traffic */
		udelay(100);
		cond_resched();
	}

	/* Check one last time */
	adreno_read_gmureg(ADRENO_DEVICE(device),
			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
	if ((reg == gmu->idle_level) ||
			(gmu->idle_level == GPU_HW_SPTP_PC &&
			reg == GPU_HW_IFPC)) {
		if (gmu->idle_level != GPU_HW_IFPC ||
				!gpudev->gx_is_on(adreno_dev))
			return 0;
	}

	dev_err(&gmu->pdev->dev,
			"Timeout waiting for lowest idle level: %d\n", reg);
	return -ETIMEDOUT;
}

static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -3024,6 +3071,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.gpu_keepalive = a6xx_gpu_keepalive,
	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
	.wait_for_lowest_idle = a6xx_wait_for_lowest_idle,
	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
	.iommu_fault_block = a6xx_iommu_fault_block,
	.reset = a6xx_reset,
+5 −26
Original line number Diff line number Diff line
@@ -1479,44 +1479,23 @@ int gmu_start(struct kgsl_device *device)
	return ret;
}

#define GMU_IDLE_TIMEOUT	10 /* ms */

/* Caller shall ensure GPU is ready for SLUMBER */
void gmu_stop(struct kgsl_device *device)
{
	struct gmu_device *gmu = &device->gmu;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned long t;
	bool idle = false;
	unsigned int reg;
	bool idle = true;

	if (!test_bit(GMU_CLK_ON, &gmu->flags))
		return;

	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
	while (!time_after(jiffies, t)) {
		adreno_read_gmureg(ADRENO_DEVICE(device),
			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
		if (reg == device->gmu.idle_level) {
			idle = true;
			break;
		}
		/* Wait 100us to reduce unnecessary AHB bus traffic */
		udelay(100);
		cond_resched();
	}

	/* Double check one last time */
	if (idle == false) {
		adreno_read_gmureg(ADRENO_DEVICE(device),
			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
		if (reg == device->gmu.idle_level)
			idle = true;
	}
	/* Wait for the lowest idle level we requested */
	if (gpudev->wait_for_lowest_idle &&
			gpudev->wait_for_lowest_idle(adreno_dev))
		idle = false;

	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);

	if (!idle || (gpudev->wait_for_gmu_idle &&
			gpudev->wait_for_gmu_idle(adreno_dev))) {
		dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
+3 −0
Original line number Diff line number Diff line
@@ -56,6 +56,9 @@
#define GPUBUSYIGNAHB		BIT(23)
#define CXGXCPUBUSYIGNAHB	BIT(30)

/* GMU timeouts */
#define GMU_IDLE_TIMEOUT        10 /* ms */

/* Constants for GMU OOBs */
#define OOB_BOOT_OPTION         0
#define OOB_SLUMBER_OPTION      1