Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0cb29de8 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Implement GMU suspend" into msm-4.9

parents f6af00d8 e923b7a8
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -803,6 +803,8 @@
#define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
#define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
#define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
#define A6XX_GMU_LLM_GLM_SLEEP_CTRL		0x1F957
#define A6XX_GMU_LLM_GLM_SLEEP_STATUS		0x1F958

/* HFI registers*/
#define A6XX_GMU_ALWAYS_ON_COUNTER_L		0x1F888
@@ -832,6 +834,10 @@
#define A6XX_GMU_HOST2GMU_INTR_INFO_3		0x1F99E
#define A6XX_GMU_GENERAL_7			0x1F9CC

/* ISENSE registers */
#define A6XX_GMU_ISENSE_CTRL			0x1F95D
#define A6XX_GPU_CS_ENABLE_REG			0x23120

#define A6XX_GMU_AO_INTERRUPT_EN		0x23B03
#define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS	0x23B05
@@ -866,6 +872,10 @@
#define A6XX_RSCC_OVERRIDE_START_ADDR			0x23500
#define A6XX_RSCC_SEQ_BUSY_DRV0				0x23501
#define A6XX_RSCC_SEQ_MEM_0_DRV0			0x23580
#define A6XX_RSCC_TCS0_DRV0_STATUS			0x23746
#define A6XX_RSCC_TCS1_DRV0_STATUS                      0x238AE
#define A6XX_RSCC_TCS2_DRV0_STATUS                      0x23A16
#define A6XX_RSCC_TCS3_DRV0_STATUS                      0x23B7E

/* GPU PDC sequencer registers in AOSS.RPMh domain */
#define	PDC_GPU_ENABLE_PDC			0x21140
+0 −32
Original line number Diff line number Diff line
@@ -1642,38 +1642,6 @@ static int adreno_start(struct kgsl_device *device, int priority)
	return ret;
}

/**
 * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
 * @device: Pointer to the device whose VBIF pipe is to be cleared
 */
static int adreno_vbif_clear_pending_transactions(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
	unsigned int val;
	unsigned long wait_for_vbif;
	int ret = 0;

	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
	/* wait for the transactions to clear */
	wait_for_vbif = jiffies + msecs_to_jiffies(100);
	while (1) {
		adreno_readreg(adreno_dev,
			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
		if ((val & mask) == mask)
			break;
		if (time_after(jiffies, wait_for_vbif)) {
			KGSL_DRV_ERR(device,
				"Wait limit reached for VBIF XIN Halt\n");
			ret = -ETIMEDOUT;
			break;
		}
	}
	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
	return ret;
}

static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
{
	int i;
+35 −0
Original line number Diff line number Diff line
@@ -499,6 +499,7 @@ struct adreno_device {
 * attached and enabled
 * @ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED - Set if a CACHE_FLUSH_TS irq storm
 * is in progress
 * @ADRENO_DEVICE_HARD_RESET - Set if soft reset fails and hard reset is needed
 */
enum adreno_device_flags {
	ADRENO_DEVICE_PWRON = 0,
@@ -515,6 +516,7 @@ enum adreno_device_flags {
	ADRENO_DEVICE_GPMU_INITIALIZED = 11,
	ADRENO_DEVICE_ISDB_ENABLED = 12,
	ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
	ADRENO_DEVICE_HARD_RESET = 14,
};

/**
@@ -1708,4 +1710,37 @@ static inline void adreno_perfcntr_active_oob_put(
	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
}

/**
 * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
 * @device: Pointer to the device whose VBIF pipe is to be cleared
 */
static inline int adreno_vbif_clear_pending_transactions(
	struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
	unsigned int val;
	unsigned long wait_for_vbif;
	int ret = 0;

	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
	/* wait for the transactions to clear */
	wait_for_vbif = jiffies + msecs_to_jiffies(100);
	while (1) {
		adreno_readreg(adreno_dev,
			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
		if ((val & mask) == mask)
			break;
		if (time_after(jiffies, wait_for_vbif)) {
			KGSL_DRV_ERR(device,
				"Wait limit reached for VBIF XIN Halt\n");
			ret = -ETIMEDOUT;
			break;
		}
	}
	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
	return ret;
}

#endif /*__ADRENO_H */
+198 −72
Original line number Diff line number Diff line
@@ -29,8 +29,6 @@
#include "kgsl_gmu.h"
#include "kgsl_trace.h"

#define OOB_REQUEST_TIMEOUT	10 /* ms */

#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))

@@ -791,6 +789,8 @@ static void _load_gmu_rpmh_ucode(struct kgsl_device *device)

#define GMU_START_TIMEOUT	10	/* ms */
#define GPU_START_TIMEOUT	100	/* ms */
#define GPU_RESET_TIMEOUT	1	/* ms */
#define GPU_RESET_TIMEOUT_US	10	/* us */

/*
 * timed_poll_check() - polling *gmu* register at given offset until
@@ -962,7 +962,9 @@ static int a6xx_oob_set(struct adreno_device *adreno_dev,
			GPU_START_TIMEOUT,
			check_mask)) {
		ret = -ETIMEDOUT;
		dev_err(&gmu->pdev->dev, "OOB set timed out\n");
		dev_err(&gmu->pdev->dev,
			"OOB set timed out, mask %x\n", set_mask);
		WARN_ON(true);
	}

	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
@@ -1119,11 +1121,14 @@ static int a6xx_hm_sptprac_enable(struct kgsl_device *device)

	/* If GMU does not control HM we must */
	if (gmu->idle_level < GPU_HW_IFPC) {

		ret = a6xx_hm_enable(ADRENO_DEVICE(device));
		if (ret) {
			dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
			return ret;
		}


	}

	/* If GMU does not control SPTPRAC we must */
@@ -1161,19 +1166,6 @@ static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
	return ret;
}

/*
 * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
 * @device: Pointer to KGSL device
 * @on: True to turn on or false to turn off
 */
static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
{
	if (on)
		return a6xx_hm_sptprac_enable(device);
	else
		return a6xx_hm_sptprac_disable(device);
}

/*
 * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
 * @device: Pointer to KGSL device
@@ -1206,6 +1198,8 @@ static int a6xx_gfx_rail_on(struct kgsl_device *device)
	return ret;
}

#define GMU_POWER_STATE_SLUMBER 15

/*
 * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
 * @device: Pointer to KGSL device
@@ -1281,10 +1275,9 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);

	/* Turn on the HM and SPTP head switches */
	ret = a6xx_hm_sptprac_control(device, true);
	ret = a6xx_hm_sptprac_enable(device);

	return ret;

error_rsc:
	dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
		return -EINVAL;
@@ -1296,7 +1289,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
	int val, ret = 0;

	/* Turn off the SPTP and HM head switches */
	ret = a6xx_hm_sptprac_control(device, false);
	ret = a6xx_hm_sptprac_disable(device);

	/* RSC sleep sequence */
	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1339,7 +1332,12 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
	int ret, i;

	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
	switch (boot_state) {
	case GMU_COLD_BOOT:
		/* Turn on the HM and SPTP head switches */
		ret = a6xx_hm_sptprac_enable(device);
		if (ret)
			return ret;

		/* Turn on TCM retention */
		kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
@@ -1347,7 +1345,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags)) {
			_load_gmu_rpmh_ucode(device);
			/* Turn on the HM and SPTP head switches */
			ret = a6xx_hm_sptprac_control(device, true);
			ret = a6xx_hm_sptprac_enable(device);
			if (ret)
				return ret;
		} else {
@@ -1371,10 +1369,19 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
					gmu->load_mode);
			return -EINVAL;
		}
	} else {
		break;
	case GMU_WARM_BOOT:
		ret = a6xx_rpmh_power_on_gpu(device);
		if (ret)
			return ret;
		break;
	case GMU_RESET:
		/* Turn on the HM and SPTP head switches */
		ret = a6xx_hm_sptprac_enable(device);
		if (ret)
			return ret;
	default:
		break;
	}

	/* Clear init result to make sure we are getting fresh value */
@@ -1394,8 +1401,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
	if (ret)
		return ret;

	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)
			&& boot_state == GMU_COLD_BOOT) {
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
		ret = a6xx_gfx_rail_on(device);
		if (ret) {
			a6xx_oob_clear(adreno_dev,
@@ -1425,7 +1431,7 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
		unsigned int perf_idx, unsigned int bw_idx)
{
	struct hfi_dcvs_cmd dcvs_cmd = {
		.ack_type = ACK_BLOCK,
		.ack_type = ACK_NONBLOCK,
		.freq = {
			.perf_idx = perf_idx,
			.clkset_opt = OPTION_AT_LEAST,
@@ -1439,10 +1445,6 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
	union gpu_perf_vote vote;
	int ret;

	if (device->state == KGSL_STATE_INIT ||
			device->state == KGSL_STATE_SUSPEND)
		dcvs_cmd.ack_type = ACK_NONBLOCK;

	kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);

	vote.fvote = dcvs_cmd.freq;
@@ -1469,43 +1471,6 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
	return ret;
}

/*
 * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
 * @adreno_dev: Pointer to adreno device
 * @mode: requested power mode
 * @arg1: first argument for mode control
 * @arg2: second argument for mode control
 */
static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
		unsigned int mode, unsigned int arg1, unsigned int arg2)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = &device->gmu;
	int ret;

	switch (mode) {
	case GMU_FW_START:
		ret = a6xx_gmu_fw_start(device, arg1);
		break;
	case GMU_FW_STOP:
		ret = a6xx_rpmh_power_off_gpu(device);
		break;
	case GMU_DCVS_NOHFI:
		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
		break;
	case GMU_NOTIFY_SLUMBER:
		ret = a6xx_notify_slumber(device);
		break;
	default:
		dev_err(&gmu->pdev->dev,
				"unsupported GMU power ctrl mode:%d\n", mode);
		ret = -EINVAL;
		break;
	}

	return ret;
}

static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
{
	unsigned int reg;
@@ -1622,6 +1587,169 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
	return 0;
}

#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
#define A6XX_IDLE_FULL_LLM              BIT(0)
#define A6XX_WAKEUP_ACK                 BIT(1)
#define A6XX_IDLE_FULL_ACK              BIT(0)
#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))

static void a6xx_isense_disable(struct kgsl_device *device)
{
	unsigned int val;
	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
		return;

	kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
	if (val) {
		kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
		kgsl_gmu_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
	}
}

static int a6xx_llm_glm_handshake(struct kgsl_device *device)
{
	unsigned int val;
	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct gmu_device *gmu = &device->gmu;

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
		return 0;

	kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
	if (!(val & A6XX_STATE_OF_CHILD)) {
		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
				A6XX_IDLE_FULL_LLM);
		if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
				A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
				A6XX_IDLE_FULL_ACK)) {
			dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
			return -EINVAL;
		}
	}

	return 0;
}

static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
{
	int ret = 0;

	if (!kgsl_gmu_isenabled(device))
		return ret;

	ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
			GPU_RESET_TIMEOUT, BIT(0));
	ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
			GPU_RESET_TIMEOUT, BIT(0));
	ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
			GPU_RESET_TIMEOUT, BIT(0));
	ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
			GPU_RESET_TIMEOUT, BIT(0));

	return ret;
}

static int a6xx_gmu_suspend(struct kgsl_device *device)
{
	/* Max GX clients on A6xx is 2: GMU and KMD */
	int ret = 0, max_client_num = 2;
	struct gmu_device *gmu = &device->gmu;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	/* do it only if LM feature is enabled */
	/* Disable ISENSE if it's on */
	a6xx_isense_disable(device);

	/* LLM-GLM handshake sequence */
	a6xx_llm_glm_handshake(device);

	/* If SPTP_RAC is on, turn off SPTP_RAC HS */
	a6xx_sptprac_disable(adreno_dev);

	/* Disconnect GPU from BUS. Clear and reconnected after reset */
	adreno_vbif_clear_pending_transactions(device);
	/* Unnecessary: a6xx_soft_reset(adreno_dev); */

	/* Check no outstanding RPMh voting */
	a6xx_complete_rpmh_votes(device);

	if (gmu->idle_level < GPU_HW_IFPC) {
		/* HM GDSC is controlled by KGSL */
		ret = a6xx_hm_disable(ADRENO_DEVICE(device));
		if (ret)
			dev_err(&gmu->pdev->dev,
				"suspend: fail: power off GPU HM\n");
	} else if (gmu->gx_gdsc) {
		if (regulator_is_enabled(gmu->gx_gdsc)) {
			/* Switch gx gdsc control from GMU to CPU
			 * force non-zero reference count in clk driver
			 * so next disable call will turn
			 * off the GDSC
			 */
			ret = regulator_enable(gmu->gx_gdsc);
			if (ret)
				dev_err(&gmu->pdev->dev,
					"suspend fail: gx enable\n");

			while ((max_client_num)) {
				ret = regulator_disable(gmu->gx_gdsc);
				if (!regulator_is_enabled(gmu->gx_gdsc))
					break;
				max_client_num -= 1;
			}

			if (!max_client_num)
				dev_err(&gmu->pdev->dev,
					"suspend fail: cannot disable gx\n");
		}
	}

	return ret;
}

/*
 * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
 * @adreno_dev: Pointer to adreno device
 * @mode: requested power mode
 * @arg1: first argument for mode control
 * @arg2: second argument for mode control
 */
static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
		unsigned int mode, unsigned int arg1, unsigned int arg2)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = &device->gmu;
	int ret;

	switch (mode) {
	case GMU_FW_START:
		ret = a6xx_gmu_fw_start(device, arg1);
		break;
	case GMU_SUSPEND:
		ret = a6xx_gmu_suspend(device);
		break;
	case GMU_FW_STOP:
		ret = a6xx_rpmh_power_off_gpu(device);
		break;
	case GMU_DCVS_NOHFI:
		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
		break;
	case GMU_NOTIFY_SLUMBER:
		ret = a6xx_notify_slumber(device);
		break;
	default:
		dev_err(&gmu->pdev->dev,
				"unsupported GMU power ctrl mode:%d\n", mode);
		ret = -EINVAL;
		break;
	}

	return ret;
}

static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1708,7 +1836,6 @@ static inline void _reg_rmw(void __iomem *regaddr,
	wmb();
}


/*
 * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
 * @adreno_dev: The adreno device pointer
@@ -2330,7 +2457,6 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A6XX_GMU_HOST2GMU_INTR_CLR),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
				A6XX_GMU_HOST2GMU_INTR_RAW_INFO),

	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
				A6XX_RBBM_SECVID_TRUST_CNTL),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
+3 −0
Original line number Diff line number Diff line
@@ -40,6 +40,8 @@
 * that the KGSL module believes a device is idle (has been inactive	*
 * past its timer) and all system resources are released.  SUSPEND is	*
 * requested by the kernel and will be enforced upon all open devices.	*
 * RESET indicates that GPU or GMU hang happens. KGSL is handling	*
 * snapshot or recover GPU from hang.					*
 */

#define KGSL_STATE_NONE		0x00000000
@@ -49,6 +51,7 @@
#define KGSL_STATE_SUSPEND	0x00000010
#define KGSL_STATE_AWARE	0x00000020
#define KGSL_STATE_SLUMBER	0x00000080
#define KGSL_STATE_RESET	0x00000100

/**
 * enum kgsl_event_results - result codes passed to an event callback when the
Loading