Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0de8bff authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Move always on timer reads to target specific code



Instead of using adreno_readreg and a bunch of if statemnts, move the
always on timer functionality to the target specific code where it
belongs. This lets us do local variations without a bunch of hacks.

Change-Id: Ic0dedbad91b6a46f55c813f83360cb205bf5e357
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent ce6cda63
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -663,8 +663,6 @@ enum adreno_regs {
	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
	ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
	ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
	ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
	ADRENO_REG_RBBM_SECVID_TRUST_CONFIG,
	ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
@@ -827,6 +825,8 @@ struct adreno_gpudev {
	int (*perfcounter_update)(struct adreno_device *adreno_dev,
				struct adreno_perfcount_register *reg,
				bool update_reg);
	/** @read_alwayson: Return the current value of the alwayson counter */
	u64 (*read_alwayson)(struct adreno_device *adreno_dev);
};

/**
+7 −0
Original line number Diff line number Diff line
@@ -1431,6 +1431,12 @@ static void a3xx_clk_set_options(struct adreno_device *adreno_dev,
	}
}

static u64 a3xx_read_alwayson(struct adreno_device *adreno_dev)
{
	/* A3XX does not have a always on timer */
	return 0;
}

struct adreno_gpudev adreno_a3xx_gpudev = {
	.reg_offsets = a3xx_register_offsets,
	.int_bits = a3xx_int_bits,
@@ -1451,4 +1457,5 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
	.coresight = {&a3xx_coresight},
#endif
	.clk_set_options = a3xx_clk_set_options,
	.read_alwayson = a3xx_read_alwayson,
};
+15 −4
Original line number Diff line number Diff line
@@ -2506,10 +2506,6 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
				A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
				A5XX_RBBM_ALWAYSON_COUNTER_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
				A5XX_RBBM_ALWAYSON_COUNTER_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
				A5XX_VBIF_XIN_HALT_CTRL0),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
@@ -2789,6 +2785,20 @@ static void a5x_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit)
	adreno_dispatcher_schedule(device);
}

static u64 a5xx_read_alwayson(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	u32 lo = 0, hi = 0;

	kgsl_regread(device, A5XX_RBBM_ALWAYSON_COUNTER_LO, &lo);

	/* The upper 32 bits are only reliable on A540 targets */
	if (adreno_is_a540(adreno_dev))
		kgsl_regread(device, A5XX_RBBM_ALWAYSON_COUNTER_HI, &hi);

	return (((u64) hi) << 32) | lo;
}

#define A5XX_INT_MASK \
	((1 << A5XX_INT_RBBM_AHB_ERROR) |		\
	 (1 << A5XX_INT_RBBM_TRANSFER_TIMEOUT) |		\
@@ -3087,4 +3097,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.preemption_close = a5xx_preemption_close,
	.preemption_schedule = a5xx_preemption_schedule,
	.clk_set_options = a5xx_clk_set_options,
	.read_alwayson = a5xx_read_alwayson,
};
+27 −22
Original line number Diff line number Diff line
@@ -102,17 +102,6 @@ static u32 a612_pwrup_reglist[] = {
	A6XX_RBBM_PERFCTR_CNTL,
};

static void _update_always_on_regs(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int *const regs = gpudev->reg_offsets;

	regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
		A6XX_CP_ALWAYS_ON_COUNTER_LO;
	regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
		A6XX_CP_ALWAYS_ON_COUNTER_HI;
}

static void a6xx_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -134,13 +123,6 @@ static void a6xx_init(struct adreno_device *adreno_dev)

	a6xx_crashdump_init(adreno_dev);

	/*
	 * If the GMU is not enabled, rewrite the offset for the always on
	 * counters to point to the CP always on instead of GMU always on
	 */
	if (!gmu_core_isenabled(device))
		_update_always_on_regs(adreno_dev);

	kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
		PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
		"powerup_register_list");
@@ -2409,10 +2391,6 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A6XX_RBBM_GBIF_HALT_ACK),
	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
				A6XX_GMU_ALWAYS_ON_COUNTER_L),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
				A6XX_GMU_ALWAYS_ON_COUNTER_H),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
				A6XX_GMU_AO_AHB_FENCE_CTRL),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
@@ -2585,6 +2563,32 @@ static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
	}
}

u64 a6xx_read_alwayson(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	u32 lo = 0, hi = 0, tmp = 0;

	if (!gmu_core_isenabled(device)) {
		kgsl_regread(device, A6XX_CP_ALWAYS_ON_COUNTER_LO, &lo);
		kgsl_regread(device, A6XX_CP_ALWAYS_ON_COUNTER_HI, &hi);
	} else {
		/* Always use the GMU AO counter when doing a AHB read */
		gmu_core_regread(device, A6XX_GMU_ALWAYS_ON_COUNTER_H, &hi);
		gmu_core_regread(device, A6XX_GMU_ALWAYS_ON_COUNTER_L, &lo);

		/* Check for overflow */
		gmu_core_regread(device, A6XX_GMU_ALWAYS_ON_COUNTER_H, &tmp);

		if (hi != tmp) {
			gmu_core_regread(device, A6XX_GMU_ALWAYS_ON_COUNTER_L,
				&lo);
			hi = tmp;
		}
	}

	return (((u64) hi) << 32) | lo;
}

struct adreno_gpudev adreno_a6xx_gpudev = {
	.reg_offsets = a6xx_register_offsets,
	.start = a6xx_start,
@@ -2620,4 +2624,5 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
#endif
	.clk_set_options = a6xx_clk_set_options,
	.read_alwayson = a6xx_read_alwayson,
};
+8 −25
Original line number Diff line number Diff line
@@ -244,31 +244,6 @@ static inline int timed_poll_check_rscc(struct kgsl_device *device,
	return -ETIMEDOUT;
}

/*
 * read_AO_counter() - Returns the 64bit always on counter value
 *
 * @device: Pointer to KGSL device
 */
static inline uint64_t read_AO_counter(struct kgsl_device *device)
{
	unsigned int l, h, h1;

	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);

	/*
	 * If there's no change in COUNTER_H we have no overflow so return,
	 * otherwise read COUNTER_L again
	 */

	if (h == h1)
		return (uint64_t) l | ((uint64_t) h << 32);

	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
	return (uint64_t) l | ((uint64_t) h1 << 32);
}

/* Preemption functions */
void a6xx_preemption_trigger(struct adreno_device *adreno_dev);
void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
@@ -297,4 +272,12 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev);
int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);

/**
 * a6xx_read_alwayson: Read the current always on clock value
 * @adreno_dev: An Adreno GPU handle
 *
 * Return: The current value of the GMU always on counter
 */
u64 a6xx_read_alwayson(struct adreno_device *adreno_dev);
#endif
Loading