Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e494a69a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Ensure CX GDSC is off in SLUMBER for RGMU"

parents cb741ea9 f78af3cc
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -473,21 +473,21 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
		.max_power = 5448,
	},
	{
		.gpurev = ADRENO_REV_A608,
		.gpurev = ADRENO_REV_A612,
		.core = 6,
		.major = 0,
		.minor = 8,
		.major = 1,
		.minor = 2,
		.patchid = ANY_ID,
		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
			ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU |
			ADRENO_IFPC | ADRENO_PERFCTRL_RETAIN,
		.sqefw_name = "a630_sqe.fw",
		.zap_name = "a608_zap",
		.zap_name = "a612_zap",
		.gpudev = &adreno_a6xx_gpudev,
		.gmem_size = (SZ_128K + SZ_4K),
		.num_protected_regs = 0x20,
		.busy_mask = 0xFFFFFFFE,
		.gpmufw_name = "a608_rgmu.bin",
		.gpmufw_name = "a612_rgmu.bin",
	},
	{
		.gpurev = ADRENO_REV_A616,
+2 −2
Original line number Diff line number Diff line
@@ -211,7 +211,7 @@ enum adreno_gpurev {
	ADRENO_REV_A512 = 512,
	ADRENO_REV_A530 = 530,
	ADRENO_REV_A540 = 540,
	ADRENO_REV_A608 = 608,
	ADRENO_REV_A612 = 612,
	ADRENO_REV_A615 = 615,
	ADRENO_REV_A616 = 616,
	ADRENO_REV_A618 = 618,
@@ -1293,7 +1293,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev)
			ADRENO_GPUREV(adreno_dev) < 700;
}

ADRENO_TARGET(a608, ADRENO_REV_A608)
ADRENO_TARGET(a612, ADRENO_REV_A612)
ADRENO_TARGET(a618, ADRENO_REV_A618)
ADRENO_TARGET(a630, ADRENO_REV_A630)
ADRENO_TARGET(a640, ADRENO_REV_A640)
+26 −26
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
	{ adreno_is_a640, a640_gbif },
	{ adreno_is_a650, a650_gbif },
	{ adreno_is_a680, a640_gbif },
	{ adreno_is_a608, a615_gbif },
	{ adreno_is_a612, a615_gbif },
};

struct kgsl_hwcg_reg {
@@ -345,7 +345,7 @@ static const struct kgsl_hwcg_reg a650_hwcg_regs[] = {
};


static const struct kgsl_hwcg_reg a608_hwcg_regs[] = {
static const struct kgsl_hwcg_reg a612_hwcg_regs[] = {
	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
@@ -407,7 +407,7 @@ static const struct {
	{adreno_is_a640, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)},
	{adreno_is_a650, a650_hwcg_regs, ARRAY_SIZE(a650_hwcg_regs)},
	{adreno_is_a680, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)},
	{adreno_is_a608, a608_hwcg_regs, ARRAY_SIZE(a608_hwcg_regs)},
	{adreno_is_a612, a612_hwcg_regs, ARRAY_SIZE(a612_hwcg_regs)},
};

static struct a6xx_protected_regs {
@@ -647,7 +647,7 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
{
	if (adreno_is_a630(adreno_dev))
		return 0x8AA8AA02;
	else if (adreno_is_a608(adreno_dev))
	else if (adreno_is_a612(adreno_dev))
		return 0xAAA8AA82;
	else
		return 0x8AA8AA82;
@@ -656,7 +656,7 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
static inline unsigned int
__get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
{
	if (adreno_is_a608(adreno_dev))
	if (adreno_is_a612(adreno_dev))
		return 0x00000022;
	else if (adreno_is_a615_family(adreno_dev))
		return 0x00000222;
@@ -667,7 +667,7 @@ __get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
static inline unsigned int
__get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
{
	if (adreno_is_a608(adreno_dev))
	if (adreno_is_a612(adreno_dev))
		return 0x00000011;
	else if (adreno_is_a615_family(adreno_dev))
		return 0x00000111;
@@ -678,7 +678,7 @@ __get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
static inline unsigned int
__get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev)
{
	if (adreno_is_a608(adreno_dev))
	if (adreno_is_a612(adreno_dev))
		return 0x00000055;
	else if (adreno_is_a615_family(adreno_dev))
		return 0x00000555;
@@ -725,11 +725,11 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)

	/*
	 * Disable SP clock before programming HWCG registers.
	 * A608 GPU is not having the GX power domain. Hence
	 * skip GMU_GX registers for A608.
	 * A612 GPU is not having the GX power domain. Hence
	 * skip GMU_GX registers for A12.
	 */

	if (!adreno_is_a608(adreno_dev))
	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
		gmu_core_regrmw(device,
			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);

@@ -738,10 +738,10 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)

	/*
	 * Enable SP clock after programming HWCG registers.
	 * A608 GPU is not having the GX power domain. Hence
	 * skip GMU_GX registers for A608.
	 * A612 GPU is not having the GX power domain. Hence
	 * skip GMU_GX registers for A612.
	 */
	if (!adreno_is_a608(adreno_dev))
	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
		gmu_core_regrmw(device,
			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);

@@ -885,7 +885,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
	if (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640) {
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
	} else if (adreno_is_a608(adreno_dev)) {
	} else if (adreno_is_a612(adreno_dev)) {
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
	} else {
@@ -893,8 +893,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
	}

	if (adreno_is_a608(adreno_dev)) {
		/* For a608 Mem pool size is reduced to 48 */
	if (adreno_is_a612(adreno_dev)) {
		/* For A612 Mem pool size is reduced to 48 */
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48);
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47);
	} else {
@@ -906,7 +906,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
		kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x400 << 11));
	else if (adreno_is_a680(adreno_dev))
		kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x800 << 11));
	else if (adreno_is_a608(adreno_dev))
	else if (adreno_is_a612(adreno_dev))
		kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x100 << 11));
	else
		kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
@@ -918,7 +918,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);

	/* Turn on GX_MEM retention */
	if (gmu_core_isenabled(device) && adreno_is_a608(adreno_dev)) {
	if (gmu_core_isenabled(device) && adreno_is_a612(adreno_dev)) {
		kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB);
		/* For CP IPC interrupt */
		kgsl_regwrite(device, A6XX_RBBM_INT_2_MASK, 0x00000010);
@@ -1291,7 +1291,7 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev,
 */
static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
{
	if (adreno_is_a608(adreno_dev))
	if (adreno_is_a612(adreno_dev))
		return 0;

	return a6xx_gmu_sptprac_enable(adreno_dev);
@@ -1303,7 +1303,7 @@ static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
 */
static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
{
	if (adreno_is_a608(adreno_dev))
	if (adreno_is_a612(adreno_dev))
		return;

	a6xx_gmu_sptprac_disable(adreno_dev);
@@ -1669,7 +1669,7 @@ static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
		gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
			| gpu_scid;

	if (adreno_is_a640(adreno_dev) || adreno_is_a608(adreno_dev)) {
	if (adreno_is_a640(adreno_dev) || adreno_is_a612(adreno_dev)) {
		kgsl_regrmw(KGSL_DEVICE(adreno_dev), A6XX_GBIF_SCACHE_CNTL1,
			A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
	} else {
@@ -1691,7 +1691,7 @@ static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
	 * On A640, the GPUHTW SCID is configured via a NoC override in the
	 * XBL image.
	 */
	if (adreno_is_a640(adreno_dev) || adreno_is_a608(adreno_dev))
	if (adreno_is_a640(adreno_dev) || adreno_is_a612(adreno_dev))
		return;

	gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
@@ -1712,7 +1712,7 @@ static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
	 * Attributes override through GBIF is not supported with MMU-500.
	 * Attributes are used as configured through SMMU pagetable entries.
	 */
	if (adreno_is_a640(adreno_dev) || adreno_is_a608(adreno_dev))
	if (adreno_is_a640(adreno_dev) || adreno_is_a612(adreno_dev))
		return;

	/*
@@ -2732,7 +2732,7 @@ static const struct {
	void (*func)(struct adreno_device *adreno_dev);
} a6xx_efuse_funcs[] = {
	{ adreno_is_a615_family, a6xx_efuse_speed_bin },
	{ adreno_is_a608, a6xx_efuse_speed_bin },
	{ adreno_is_a612, a6xx_efuse_speed_bin },
};

static void a6xx_check_features(struct adreno_device *adreno_dev)
@@ -2941,10 +2941,10 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = {
static void a6xx_perfcounter_init(struct adreno_device *adreno_dev)
{
	/*
	 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A608.
	 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A612.
	 * Mark them as broken so that they can't be used.
	 */
	if (adreno_is_a608(adreno_dev)) {
	if (adreno_is_a612(adreno_dev)) {
		a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN;
		a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN;
	}
+1 −1
Original line number Diff line number Diff line
@@ -1177,7 +1177,7 @@ void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt)
		return;

	/* Domain attribute to enable system cache for GPU pagetable walks */
	if (adreno_is_a640(adreno_dev) || adreno_is_a608(adreno_dev))
	if (adreno_is_a640(adreno_dev) || adreno_is_a612(adreno_dev))
		ret = iommu_domain_set_attr(iommu_pt->domain,
			DOMAIN_ATTR_USE_LLC_NWA, &gpuhtw_llc_enable);
	else
+25 −2
Original line number Diff line number Diff line
@@ -191,22 +191,45 @@ static int rgmu_enable_clks(struct kgsl_device *device)
	return 0;
}

#define CX_GDSC_TIMEOUT	5000	/* ms */
static int rgmu_disable_gdsc(struct kgsl_device *device)
{
	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
	int ret = 0;
	unsigned long t;

	if (IS_ERR_OR_NULL(rgmu->cx_gdsc))
		return 0;

	ret = regulator_disable(rgmu->cx_gdsc);
	if (ret)
	if (ret) {
		dev_err(&rgmu->pdev->dev,
				"Failed to disable CX gdsc:%d\n", ret);

		return ret;
	}

	/*
	 * After GX GDSC is off, CX GDSC must be off.
	 * Voting off alone from GPU driver cannot
	 * guarantee CX GDSC off. Polling with 5sec
	 * timeout to ensure CX GDSC is off.
	 */
	t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
	do {
		if (!regulator_is_enabled(rgmu->cx_gdsc))
			return 0;
		usleep_range(10, 100);

	} while (!(time_after(jiffies, t)));

	if (!regulator_is_enabled(rgmu->cx_gdsc))
		return 0;

	dev_err(&rgmu->pdev->dev, "RGMU CX gdsc off timeout\n");

	return -ETIMEDOUT;
}

static int rgmu_enable_gdsc(struct rgmu_device *rgmu)
{
	int ret;