Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c5a7a49 authored by Deepak Kumar's avatar Deepak Kumar
Browse files

msm: kgsl: Use correct checks for GPMU related functionality



Use "gmu_core_gpmu_isenabled" check for all GPMU related
functionality instead of "gmu_core_isenabled" as later will
return true even when ADRENO_GPMU feature flag is not defined.

Change-Id: Ic577052d8c1cc52965e07c557b178fec444c3613
Signed-off-by: default avatarDeepak Kumar <dkumar@codeaurora.org>
parent 681f798f
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -633,7 +633,7 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
	 * This is usually harmless because the GMU will abort power collapse
	 * and change the fence back to ALLOW. Poll so that this can happen.
	 */
	if (gmu_core_isenabled(device)) {
	if (gmu_core_gpmu_isenabled(device)) {
		adreno_readreg(adreno_dev,
				ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
				&fence);
@@ -2070,7 +2070,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)
		}
	}

	if (gmu_core_isenabled(device) && adreno_dev->perfctr_ifpc_lo == 0) {
	if (gmu_core_gpmu_isenabled(device) &&
			adreno_dev->perfctr_ifpc_lo == 0) {
		ret = adreno_perfcounter_get(adreno_dev,
				KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 4,
				&adreno_dev->perfctr_ifpc_lo, NULL,
@@ -3375,7 +3376,7 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,

	adreno_writereg(adreno_dev, offset, val);

	if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
	if (!gmu_core_gpmu_isenabled(KGSL_DEVICE(adreno_dev)))
		return 0;

	for (i = 0; i < GMU_CORE_LONG_WAKEUP_RETRY_LIMIT; i++) {
+7 −6
Original line number Diff line number Diff line
@@ -787,7 +787,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
	static bool patch_reglist;

	/* runtime adjust callbacks based on feature sets */
	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		/* Legacy idle management if gmu is disabled */
		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
	/* enable hardware clockgating */
@@ -865,7 +865,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);

	/* Turn on GX_MEM retention */
	if (gmu_core_isenabled(device) && adreno_is_a612(adreno_dev)) {
	if (gmu_core_gpmu_isenabled(device) && adreno_is_a612(adreno_dev)) {
		kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB);
		/* For CP IPC interrupt */
		kgsl_regwrite(device, A6XX_RBBM_INT_2_MASK, 0x00000010);
@@ -1356,7 +1356,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile,
static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
		bool state)
{
	if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
	if (!gmu_core_gpmu_isenabled(KGSL_DEVICE(adreno_dev)))
		return;

	adreno_write_gmureg(adreno_dev,
@@ -1394,7 +1394,8 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
			return ret;
	}

	if (GMU_DEV_OP_VALID(gmu_dev_ops, load_firmware))
	if (gmu_core_gpmu_isenabled(device) &&
			GMU_DEV_OP_VALID(gmu_dev_ops, load_firmware))
		return gmu_dev_ops->load_firmware(device);

	return 0;
@@ -1411,7 +1412,7 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
	 * For the soft reset case with GMU enabled this part is done
	 * by the GMU firmware
	 */
	if (gmu_core_isenabled(device) &&
	if (gmu_core_gpmu_isenabled(device) &&
		!test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
		return 0;

@@ -1506,7 +1507,7 @@ static int a6xx_reset(struct kgsl_device *device, int fault)
	int i = 0;

	/* Use the regular reset sequence for No GMU */
	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return adreno_reset(device, fault);

	/* Transition from ACTIVE to RESET state */
+17 −14
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -556,7 +556,7 @@ static int a6xx_gmu_oob_set(struct adreno_device *adreno_dev,
	int ret = 0;
	int set, check;

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev)) {
@@ -609,7 +609,7 @@ static inline void a6xx_gmu_oob_clear(struct adreno_device *adreno_dev,
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	int clear;

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return;

	if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev)) {
@@ -660,6 +660,9 @@ static int a6xx_gmu_hfi_start_msg(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct hfi_start_cmd req;

	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev))
		return hfi_send_req(KGSL_GMU_DEVICE(device),
					 H2F_MSG_START, &req);
@@ -704,7 +707,7 @@ static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
{
	int ret = 0;

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return ret;

	ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
@@ -735,7 +738,7 @@ int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	if (!gmu_core_gpmu_isenabled(device) ||
	if (!gmu_core_isenabled(device) ||
			!adreno_has_sptprac_gdsc(adreno_dev))
		return 0;

@@ -763,7 +766,7 @@ void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	if (!gmu_core_gpmu_isenabled(device) ||
	if (!gmu_core_isenabled(device) ||
			!adreno_has_sptprac_gdsc(adreno_dev))
		return;

@@ -798,6 +801,9 @@ static bool a6xx_gmu_gx_is_on(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned int val;

	if (!gmu_core_isenabled(device))
		return true;

	gmu_core_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
	return is_on(val);
}
@@ -871,7 +877,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
	unsigned long t;
	uint64_t ts1, ts2, ts3;

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	ts1 = read_AO_counter(device);
@@ -1076,10 +1082,6 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
	int ret =  -EINVAL;

	/* there is no GMU */
	if (!gmu_core_isenabled(device))
		return 0;

	/* GMU fw already saved and verified so do nothing new */
	if (gmu->fw_image)
		return 0;
@@ -1395,7 +1397,7 @@ static int a6xx_gmu_ifpc_store(struct adreno_device *adreno_dev,
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	unsigned int requested_idle_level;

	if (!gmu_core_isenabled(device) ||
	if (!gmu_core_gpmu_isenabled(device) ||
			!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
		return -EINVAL;

@@ -1429,7 +1431,8 @@ static unsigned int a6xx_gmu_ifpc_show(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	return gmu_core_isenabled(device) && gmu->idle_level  >= GPU_HW_IFPC;
	return gmu_core_gpmu_isenabled(device) &&
			gmu->idle_level  >= GPU_HW_IFPC;
}

struct gmu_mem_type_desc {
@@ -1518,7 +1521,7 @@ static int a6xx_gmu_wait_for_active_transition(
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	gmu_core_regread(device,
+3 −3
Original line number Diff line number Diff line
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -318,7 +318,7 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
	 * free when the GPU is already powered on, whereas an OOB requires an
	 * unconditional handshake with the GMU.
	 */
	if (gmu_core_isenabled(device))
	if (gmu_core_gpmu_isenabled(device))
		gmu_core_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2);

	/*
@@ -422,7 +422,7 @@ void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
	 * We can now safely clear the preemption keepalive bit, allowing
	 * power collapse to resume its regular activity.
	 */
	if (gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
	if (gmu_core_gpmu_isenabled(KGSL_DEVICE(adreno_dev)))
		gmu_core_regrmw(KGSL_DEVICE(adreno_dev),
				A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0);

+7 −9
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@ static int a6xx_rgmu_oob_set(struct adreno_device *adreno_dev,
	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
	int ret, set, check;

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	set = BIT(req + 16);
@@ -153,7 +153,7 @@ static inline void a6xx_rgmu_oob_clear(struct adreno_device *adreno_dev,
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return;

	gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, BIT(req + 24));
@@ -218,7 +218,7 @@ static int a6xx_rgmu_ifpc_store(struct adreno_device *adreno_dev,
	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
	unsigned int requested_idle_level;

	if (!gmu_core_isenabled(device) ||
	if (!gmu_core_gpmu_isenabled(device) ||
		!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
		return -EINVAL;

@@ -247,7 +247,8 @@ static unsigned int a6xx_rgmu_ifpc_show(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);

	return gmu_core_isenabled(device) && rgmu->idle_level == GPU_HW_IFPC;
	return gmu_core_gpmu_isenabled(device) &&
			rgmu->idle_level == GPU_HW_IFPC;
}


@@ -282,7 +283,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
	unsigned long t;
	uint64_t ts1, ts2, ts3;

	if (!gmu_core_isenabled(device) ||
	if (!gmu_core_gpmu_isenabled(device) ||
			rgmu->idle_level != GPU_HW_IFPC)
		return 0;

@@ -486,7 +487,7 @@ static int a6xx_rgmu_gpu_pwrctrl(struct adreno_device *adreno_dev,
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	int ret = 0;

	if (!gmu_core_isenabled(device))
	if (!gmu_core_gpmu_isenabled(device))
		return 0;

	switch (mode) {
@@ -526,9 +527,6 @@ static int a6xx_rgmu_load_firmware(struct kgsl_device *device)
	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
	int ret;

	if (!gmu_core_isenabled(device))
		return 0;

	/* RGMU fw already saved and verified so do nothing new */
	if (rgmu->fw_hostptr)
		return 0;
Loading