Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 90da593e authored by Harshdeep Dhatt's avatar Harshdeep Dhatt
Browse files

msm: kgsl: Create a6xx gmu power ops



Instead of overwhelming the legacy power up/down sequences,
create a6xx gmu specific power up/down sequences. The
gpudev power ops will be set during a6xx gmu probe so that
we don't have to do gmu checks in legacy probe.

Change-Id: I897d775bdc3c9f97d38d20bc39b56f2551703bce
Signed-off-by: default avatarHarshdeep Dhatt <hdhatt@codeaurora.org>
parent 251880f8
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -33,9 +33,11 @@ msm_kgsl-y += \
	adreno_a5xx_snapshot.o \
	adreno_a6xx.o \
	adreno_a6xx_gmu.o \
	adreno_a6xx_gmu_snapshot.o \
	adreno_a6xx_hfi.o \
	adreno_a6xx_preempt.o \
	adreno_a6xx_rgmu.o \
	adreno_a6xx_rpmh.o \
	adreno_a6xx_snapshot.o \
	adreno_cp_parser.o \
	adreno_dispatch.o \
+11 −11
Original line number Diff line number Diff line
@@ -782,7 +782,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
		.features = ADRENO_RPMH | ADRENO_IFPC |
			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
			ADRENO_IOCOHERENT | ADRENO_PREEMPTION,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M,
		.bus_width = 32,
@@ -881,7 +881,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
		.features = ADRENO_RPMH | ADRENO_PREEMPTION |
			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
			ADRENO_IOCOHERENT,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_512K,
		.bus_width = 32,
@@ -908,7 +908,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
		.features = ADRENO_RPMH | ADRENO_PREEMPTION |
			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
			ADRENO_IOCOHERENT,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_512K,
		.bus_width = 32,
@@ -935,7 +935,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a619 = {
		.features = ADRENO_RPMH | ADRENO_PREEMPTION |
			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
			ADRENO_IOCOHERENT,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_size = SZ_512K,
		.bus_width = 32,
	},
@@ -1080,7 +1080,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT |
			ADRENO_IFPC | ADRENO_PREEMPTION | ADRENO_ACD |
			ADRENO_APRIV,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_512K,
		.bus_width = 32,
@@ -1170,7 +1170,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
		.features = ADRENO_RPMH | ADRENO_GPMU |
			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT |
			ADRENO_IFPC | ADRENO_PREEMPTION,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_1M, //Verified 1MB
		.bus_width = 32,
@@ -1250,7 +1250,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
		.features = ADRENO_RPMH | ADRENO_GPMU |
			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
			ADRENO_IFPC | ADRENO_APRIV,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
		.bus_width = 32,
@@ -1279,7 +1279,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
			ADRENO_IFPC | ADRENO_PREEMPTION | ADRENO_ACD |
			ADRENO_LM | ADRENO_APRIV,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
		.bus_width = 32,
@@ -1305,7 +1305,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
	.base = {
		DEFINE_ADRENO_REV(ADRENO_REV_A680, 6, 8, 0, ANY_ID),
		.features = ADRENO_RPMH | ADRENO_GPMU,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_2M,
		.bus_width = 32,
@@ -1407,7 +1407,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
		.features = ADRENO_RPMH | ADRENO_PREEMPTION |
			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
			ADRENO_IOCOHERENT,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0x100000,
		.gmem_size = SZ_512K,
		.bus_width = 32,
@@ -1548,7 +1548,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a660 = {
		.features = ADRENO_RPMH | ADRENO_GPMU | ADRENO_APRIV |
				ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
				ADRENO_IFPC,
		.gpudev = &adreno_a6xx_gpudev,
		.gpudev = &adreno_a6xx_gmu_gpudev,
		.gmem_base = 0,
		.gmem_size = SZ_1M + SZ_512K,
		.bus_width = 32,
+1 −25
Original line number Diff line number Diff line
@@ -321,30 +321,6 @@ void adreno_fault_detect_stop(struct adreno_device *adreno_dev)
	adreno_dev->fast_hang_detect = 0;
}

#define GMU_CM3_CFG_NONMASKINTR_SHIFT	9

/* Send an NMI to the GMU */
void adreno_gmu_send_nmi(struct adreno_device *adreno_dev)
{
	u32 val;
	/* Mask so there's no interrupt caused by NMI */
	adreno_write_gmureg(adreno_dev,
			ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);

	/* Make sure the interrupt is masked before causing it */
	wmb();
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
		adreno_write_gmureg(adreno_dev,
				ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);

	adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, &val);
	val |= 1 << GMU_CM3_CFG_NONMASKINTR_SHIFT;
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, val);

	/* Make sure the NMI is invoked before we proceed*/
	wmb();
}

/*
 * A workqueue callback responsible for actually turning on the GPU after a
 * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any
@@ -1831,7 +1807,7 @@ int adreno_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
	return ret;
}

static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
{
	int i;
	struct adreno_ringbuffer *rb;
+10 −1
Original line number Diff line number Diff line
@@ -905,6 +905,7 @@ extern unsigned int *adreno_ft_regs_val;
extern struct adreno_gpudev adreno_a3xx_gpudev;
extern struct adreno_gpudev adreno_a5xx_gpudev;
extern struct adreno_gpudev adreno_a6xx_gpudev;
extern struct adreno_gpudev adreno_a6xx_gmu_gpudev;

extern int adreno_wake_nice;
extern unsigned int adreno_wake_timeout;
@@ -1758,7 +1759,6 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
	enum adreno_regs offset, unsigned int val,
	unsigned int fence_mask);
int adreno_clear_pending_transactions(struct kgsl_device *device);
void adreno_gmu_send_nmi(struct adreno_device *adreno_dev);


/**
@@ -1858,6 +1858,15 @@ int adreno_power_cycle_bool(struct adreno_device *adreno_dev,
int adreno_power_cycle_u32(struct adreno_device *adreno_dev,
	u32 *flag, u32 val);

/**
 * adreno_set_active_ctxs_null - Give up active context refcount
 * @adreno_dev: Adreno GPU device handle
 *
 * This puts back the reference for that last active context on
 * each ringbuffer when going in and out of slumber.
 */
void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev);

/**
 * adreno_get_bus_counters - Allocate the bus dcvs counters
 * @adreno_dev: Adreno GPU device handle
+63 −28
Original line number Diff line number Diff line
@@ -119,7 +119,7 @@ static u32 a615_pwrup_reglist[] = {

static int a6xx_get_cp_init_cmds(struct adreno_device *adreno_dev);

static int a6xx_init(struct adreno_device *adreno_dev)
int a6xx_init(struct adreno_device *adreno_dev)
{
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);

@@ -400,13 +400,7 @@ static void a6xx_set_secvid(struct kgsl_device *device)
#define A6XX_APRIV_DEFAULT \
	((1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1))

/*
 * a6xx_start() - Device start
 * @adreno_dev: Pointer to adreno device
 *
 * a6xx device start
 */
static void a6xx_start(struct adreno_device *adreno_dev)
void a6xx_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
@@ -860,11 +854,7 @@ static int a6xx_post_start(struct adreno_device *adreno_dev)
	return ret;
}

/*
 * a6xx_rb_start() - Start the ringbuffer
 * @adreno_dev: Pointer to adreno device
 */
static int a6xx_rb_start(struct adreno_device *adreno_dev)
int a6xx_rb_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
@@ -1030,11 +1020,7 @@ static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
	return (reg & BIT(23)) ? false : true;
}

/*
 * a6xx_microcode_read() - Read microcode
 * @adreno_dev: Pointer to adreno device
 */
static int a6xx_microcode_read(struct adreno_device *adreno_dev)
int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
@@ -2332,17 +2318,11 @@ static struct adreno_perfcounters a6xx_perfcounters = {
	ARRAY_SIZE(a6xx_perfcounter_groups),
};

static int a6xx_probe(struct platform_device *pdev,
		u32 chipid, const struct adreno_gpu_core *gpucore)
int a6xx_probe_common(struct platform_device *pdev,
	struct	adreno_device *adreno_dev, u32 chipid,
	const struct adreno_gpu_core *gpucore)
{
	struct adreno_device *adreno_dev;
	struct adreno_gpudev *gpudev = gpucore->gpudev;
	struct kgsl_device *device;

	adreno_dev = (struct adreno_device *)
		of_device_get_match_data(&pdev->dev);

	memset(adreno_dev, 0, sizeof(*adreno_dev));

	adreno_dev->gpucore = gpucore;
	adreno_dev->chipid = chipid;
@@ -2385,13 +2365,33 @@ static int a6xx_probe(struct platform_device *pdev,
		adreno_dev->perfctr_ifpc_lo =
			A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L;


	return adreno_device_probe(pdev, adreno_dev);
}

static int a6xx_probe(struct platform_device *pdev,
		u32 chipid, const struct adreno_gpu_core *gpucore)
{
	struct adreno_device *adreno_dev;
	struct kgsl_device *device;
	int ret;

	adreno_dev = (struct adreno_device *)
		of_device_get_match_data(&pdev->dev);

	memset(adreno_dev, 0, sizeof(*adreno_dev));

	ret = a6xx_probe_common(pdev, adreno_dev, chipid, gpucore);
	if (ret)
		return ret;

	device = KGSL_DEVICE(adreno_dev);

	timer_setup(&device->idle_timer, kgsl_timer, 0);

	INIT_WORK(&device->idle_check_ws, kgsl_idle_check);

	return adreno_device_probe(pdev, adreno_dev);
	return 0;
}


@@ -2685,3 +2685,38 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.read_alwayson = a6xx_read_alwayson,
	.power_ops = &adreno_power_operations,
};

struct adreno_gpudev adreno_a6xx_gmu_gpudev = {
	.reg_offsets = a6xx_register_offsets,
	.probe = a6xx_gmu_device_probe,
	.start = a6xx_start,
	.snapshot = a6xx_snapshot,
	.init = a6xx_init,
	.irq_handler = a6xx_irq_handler,
	.rb_start = a6xx_rb_start,
	.regulator_enable = a6xx_sptprac_enable,
	.regulator_disable = a6xx_sptprac_disable,
	.perfcounters = &a6xx_perfcounters,
	.read_throttling_counters = a6xx_read_throttling_counters,
	.microcode_read = a6xx_microcode_read,
	.gpu_keepalive = a6xx_gpu_keepalive,
	.hw_isidle = a6xx_hw_isidle,
	.iommu_fault_block = a6xx_iommu_fault_block,
	.reset = a6xx_reset,
	.preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
	.preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
	.preemption_init = a6xx_preemption_init,
	.preemption_schedule = a6xx_preemption_schedule,
	.set_marker = a6xx_set_marker,
	.preemption_context_init = a6xx_preemption_context_init,
	.preemption_context_destroy = a6xx_preemption_context_destroy,
	.sptprac_is_on = a6xx_sptprac_is_on,
	.ccu_invalidate = a6xx_ccu_invalidate,
	.perfcounter_update = a6xx_perfcounter_update,
#ifdef CONFIG_QCOM_KGSL_CORESIGHT
	.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
#endif
	.clk_set_options = a6xx_clk_set_options,
	.read_alwayson = a6xx_read_alwayson,
	.power_ops = &a6xx_gmu_power_ops,
};
Loading