Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f4f0611 authored by Kyle Yan's avatar Kyle Yan Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Move hardware idle detection in to hardware specific driver." into msm-4.9

parents f95e8973 62d5cecf
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -676,7 +676,7 @@
#define A6XX_GMU_HOST_INTERRUPT_CLR		0x23B04
#define A6XX_GMU_HOST_INTERRUPT_STATUS		0x23B05
#define A6XX_GMU_HOST_INTERRUPT_MASK		0x23B06
#define A6XX_GMU_GPU_CX_BUSY_STATUS		0x23B0C
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
#define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
#define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
#define A6XX_GMU_AO_SPARE_CNTL			0x23B16
+5 −0
Original line number Diff line number Diff line
@@ -2243,6 +2243,11 @@ bool adreno_hw_isidle(struct adreno_device *adreno_dev)
{
	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
	unsigned int reg_rbbm_status;
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);

	/* if hw driver implements idle check - use it */
	if (gpudev->hw_isidle)
		return gpudev->hw_isidle(adreno_dev);

	if (adreno_is_a540(adreno_dev))
		/**
+2 −2
Original line number Diff line number Diff line
@@ -852,10 +852,10 @@ struct adreno_gpudev {
				unsigned int clear_mask);
	void (*oob_clear)(struct adreno_device *adreno_dev,
				unsigned int clear_mask);
	bool (*hw_isidle)(struct adreno_device *);
	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
				unsigned int arg1, unsigned int arg2);
	bool (*gmu_isidle)(struct adreno_device *);
	bool (*hw_isidle)(struct adreno_device *);
	int (*wait_for_gmu_idle)(struct adreno_device *);
};

/**
+31 −21
Original line number Diff line number Diff line
@@ -193,6 +193,11 @@ static void a6xx_start(struct adreno_device *adreno_dev)
	unsigned int bit, mal, mode, glbl_inv;
	unsigned int amsbc = 0;

	/* runtime adjust callbacks based on feature sets */
	if (!kgsl_gmu_isenabled(device))
		/* Legacy idle management if gmu is disabled */
		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;

	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
			ARRAY_SIZE(a6xx_vbif_platforms));
	/*
@@ -925,8 +930,6 @@ static int a6xx_gfx_rail_on(struct kgsl_device *device)
	return ret;
}

#define GMU_POWER_STATE_SLUMBER 15

/*
 * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
 * @device: Pointer to KGSL device
@@ -959,7 +962,7 @@ static int a6xx_notify_slumber(struct kgsl_device *device)
		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
	else {
		kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
		if (state != GMU_POWER_STATE_SLUMBER) {
		if (state != GPU_HW_SLUMBER) {
			dev_err(&gmu->pdev->dev,
					"Failed to prepare for slumber\n");
			ret = -EINVAL;
@@ -1258,29 +1261,35 @@ static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
	return ret;
}

static bool a6xx_gmu_isidle(struct adreno_device *adreno_dev)
static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = &device->gmu;
	unsigned int value;
	unsigned int reg;

	/* Check if GMU on */
	if (!(gmu->flags & GMU_CLK_ON))
		return true;
	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
		A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
	return ((~reg & GPUBUSYIGNAHB) != 0);
}

	/* Ensure GPU is in its lowest power state */
	kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &value);
static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = &device->gmu;

	if (value < gmu->idle_level)
		return false;
	if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
		gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
		dev_err(&gmu->pdev->dev,
			"GMU is not going to powerstate %d\n",
			gmu->idle_level);
		return -ETIMEDOUT;
	}

	/* Ensure GPU and GMU are both idle */
	kgsl_gmu_regread(device->reg_virt, A6XX_GMU_GPU_CX_BUSY_STATUS,
			&value);
	if ((value & SLUMBER_CHECK_MASK) != SLUMBER_CHECK_MASK)
		return false;
	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
		0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
		return -ETIMEDOUT;
	}

	return true;
	return 0;
}

/*
@@ -2040,5 +2049,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.oob_set = a6xx_oob_set,
	.oob_clear = a6xx_oob_clear,
	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
	.gmu_isidle = a6xx_gmu_isidle,
	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle
};
+6 −2
Original line number Diff line number Diff line
@@ -1086,7 +1086,7 @@ int gmu_probe(struct kgsl_device *device)

	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);

	gmu->idle_level = GPU_HW_CGC;
	gmu->idle_level = GPU_HW_ACTIVE;

	return 0;

@@ -1312,7 +1312,11 @@ void gmu_stop(struct kgsl_device *device)
	if (!test_bit(GMU_CLK_ON, &gmu->flags))
		return;

	/* TODO: Check for conditions to enter slumber */
	if (gpudev->wait_for_gmu_idle &&
		!gpudev->wait_for_gmu_idle(adreno_dev)) {
		dev_err(&gmu->pdev->dev, "Failure to stop gmu");
		return;
	}

	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);

Loading