Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit efa44eb2 authored by Harshdeep Dhatt's avatar Harshdeep Dhatt
Browse files

msm: kgsl: Pare down the gmu core and dev ops



Remove the ones that we don't need anymore. Also, remove
KGSL_STATE_RESET as it was only needed with gmu recovery, which
now is completely target specific. Also, remove unused adreno gmu
register defines.

Change-Id: I20b8bc04714e30cd7418d81e8634e4c5079fa175
Signed-off-by: default avatarHarshdeep Dhatt <hdhatt@codeaurora.org>
parent d2645d62
Loading
Loading
Loading
Loading
+21 −137
Original line number Diff line number Diff line
@@ -1646,8 +1646,6 @@ int adreno_clear_pending_transactions(struct kgsl_device *device)

	if (adreno_has_gbif(adreno_dev)) {

		/* This is taken care by GMU firmware if GMU is enabled */
		if (!gmu_core_gpmu_isenabled(device)) {
		/* Halt GBIF GX traffic and poll for halt ack */
		if (adreno_is_a615_family(adreno_dev)) {
			adreno_writereg(adreno_dev,
@@ -1664,9 +1662,9 @@ int adreno_clear_pending_transactions(struct kgsl_device *device)
				ADRENO_REG_RBBM_GBIF_HALT_ACK,
				gpudev->gbif_gx_halt_mask);
		}

		if (ret)
			return ret;
		}

		/* Halt new client requests */
		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT,
@@ -1734,10 +1732,6 @@ static int adreno_init(struct kgsl_device *device)
			return ret;
	}

	ret = gmu_core_init(device);
	if (ret)
		return ret;

	/* Put the GPU in a responsive state */
	if (ADRENO_GPUREV(adreno_dev) < 600) {
		/* No need for newer generation architectures */
@@ -2120,20 +2114,7 @@ static int _adreno_start(struct adreno_device *adreno_dev)

	status = kgsl_mmu_start(device);
	if (status)
		goto error_boot_oob_clear;

	/* Send OOB request to turn on the GX */
	status = gmu_core_dev_oob_set(device, oob_gpu);
	if (status) {
		gmu_core_snapshot(device);
		goto error_oob_clear;
	}

	status = gmu_core_dev_hfi_start_msg(device);
	if (status) {
		gmu_core_snapshot(device);
		goto error_oob_clear;
	}
		goto error_pwr_off;

	adreno_get_bus_counters(adreno_dev);

@@ -2157,7 +2138,7 @@ static int _adreno_start(struct adreno_device *adreno_dev)

	status = gpudev->rb_start(adreno_dev);
	if (status)
		goto error_oob_clear;
		goto error_pwr_off;

	/*
	 * At this point it is safe to assume that we recovered. Setting
@@ -2174,22 +2155,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)

	set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);

	/* Send OOB request to allow IFPC */
	gmu_core_dev_oob_clear(device, oob_gpu);

	/* If we made it this far, the BOOT OOB was sent to the GMU */
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
		gmu_core_dev_oob_clear(device, oob_boot_slumber);

	return 0;

error_oob_clear:
	gmu_core_dev_oob_clear(device, oob_gpu);

error_boot_oob_clear:
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
		gmu_core_dev_oob_clear(device, oob_boot_slumber);

error_pwr_off:
	/* set the state back to original state */
	kgsl_pwrctrl_change_state(device, state);
@@ -2231,14 +2198,6 @@ static int adreno_stop(struct kgsl_device *device)
	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
		return 0;

	error = gmu_core_dev_oob_set(device, oob_gpu);
	if (error) {
		gmu_core_dev_oob_clear(device, oob_gpu);
			gmu_core_snapshot(device);
			error = -EINVAL;
			goto no_gx_power;
	}

	kgsl_pwrscale_update_stats(device);

	adreno_irqctrl(adreno_dev, 0);
@@ -2249,24 +2208,8 @@ static int adreno_stop(struct kgsl_device *device)
	/* Save physical performance counter values before GPU power down*/
	adreno_perfcounter_save(adreno_dev);

	gmu_core_dev_prepare_stop(device);
	gmu_core_dev_oob_clear(device, oob_gpu);

	/*
	 * Saving perfcounters will use an OOB to put the GMU into
	 * active state. Before continuing, we should wait for the
	 * GMU to return to the lowest idle level. This is
	 * because some idle level transitions require VBIF and MMU.
	 */

	if (!error && gmu_core_dev_wait_for_lowest_idle(device)) {
		gmu_core_snapshot(device);
		error = -EINVAL;
	}

	adreno_clear_pending_transactions(device);

no_gx_power:
	adreno_dispatcher_stop(adreno_dev);

	adreno_ringbuffer_stop(adreno_dev);
@@ -2277,16 +2220,6 @@ static int adreno_stop(struct kgsl_device *device)
	if (!IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice))
		llcc_slice_deactivate(adreno_dev->gpuhtw_llc_slice);

	/*
	 * The halt is not cleared in the above function if we have GBIF.
	 * Clear it here if GMU is enabled as GMU stop needs access to
	 * system memory to stop. For non-GMU targets, we don't need to
	 * clear it as it will get cleared automatically once headswitch
	 * goes OFF immediately after adreno_stop.
	 */
	if (gmu_core_gpmu_isenabled(device))
		adreno_deassert_gbif_halt(adreno_dev);

	adreno_set_active_ctxs_null(adreno_dev);

	clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
@@ -2887,14 +2820,6 @@ void adreno_spin_idle_debug(struct adreno_device *adreno_dev,

	dev_err(device->dev, " hwfault=%8.8X\n", hwfault);

	/*
	 * If CP is stuck, gmu may not perform as expected. So force a gmu
	 * snapshot which captures entire state as well as sets the gmu fault
	 * because things need to be reset anyway.
	 */
	if (gmu_core_isenabled(device))
		gmu_core_snapshot(device);
	else
	kgsl_device_snapshot(device, NULL, false);
}

@@ -3090,47 +3015,6 @@ static void adreno_regwrite(struct kgsl_device *device,
	__raw_writel(value, reg);
}

/**
 * adreno_gmu_clear_and_unmask_irqs() - Clear pending IRQs and Unmask IRQs
 * @adreno_dev: Pointer to the Adreno device that owns the GMU
 */
void adreno_gmu_clear_and_unmask_irqs(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);

	/* Clear any pending IRQs before unmasking on GMU */
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
			0xFFFFFFFF);
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
			0xFFFFFFFF);

	/* Unmask needed IRQs on GMU */
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
			(unsigned int) ~(gmu_dev_ops->gmu2host_intr_mask));
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
			(unsigned int) ~(gmu_dev_ops->gmu_ao_intr_mask));
}

/**
 * adreno_gmu_mask_and_clear_irqs() - Mask all IRQs and clear pending IRQs
 * @adreno_dev: Pointer to the Adreno device that owns the GMU
 */
void adreno_gmu_mask_and_clear_irqs(struct adreno_device *adreno_dev)
{
	/* Mask all IRQs on GMU */
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
			0xFFFFFFFF);
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
			0xFFFFFFFF);

	/* Clear any pending IRQs before disabling */
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
			0xFFFFFFFF);
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
			0xFFFFFFFF);
}

/*
 * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled
 * @adreno_dev: Pointer to the Adreno device that owns the GMU
+0 −17
Original line number Diff line number Diff line
@@ -709,23 +709,9 @@ enum adreno_regs {
	ADRENO_REG_VBIF_VERSION,
	ADRENO_REG_GBIF_HALT,
	ADRENO_REG_GBIF_HALT_ACK,
	ADRENO_REG_GMU_AO_INTERRUPT_EN,
	ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
	ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
	ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
	ADRENO_REG_GMU_AHB_FENCE_STATUS,
	ADRENO_REG_GMU_RPMH_POWER_STATE,
	ADRENO_REG_GMU_HFI_CTRL_STATUS,
	ADRENO_REG_GMU_HFI_VERSION_INFO,
	ADRENO_REG_GMU_HFI_SFR_ADDR,
	ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
	ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
	ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
	ADRENO_REG_GMU_HOST2GMU_INTR_SET,
	ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
	ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
	ADRENO_REG_GMU_NMI_CONTROL_STATUS,
	ADRENO_REG_GMU_CM3_CFG,
	ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
	ADRENO_REG_REGISTER_MAX,
};
@@ -1762,9 +1748,6 @@ static inline void adreno_reg_offset_init(u32 *reg_offsets)
	}
}


void adreno_gmu_clear_and_unmask_irqs(struct adreno_device *adreno_dev);
void adreno_gmu_mask_and_clear_irqs(struct adreno_device *adreno_dev);
int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
	enum adreno_regs offset, unsigned int val,
	unsigned int fence_mask);
+0 −28
Original line number Diff line number Diff line
@@ -2479,40 +2479,12 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A6XX_RBBM_GBIF_HALT_ACK),
	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
				A6XX_GMU_AO_INTERRUPT_EN),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
				A6XX_GMU_AO_HOST_INTERRUPT_CLR),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
				A6XX_GMU_AO_HOST_INTERRUPT_MASK),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
				A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
				A6XX_GMU_AHB_FENCE_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_CTRL_STATUS,
				A6XX_GMU_HFI_CTRL_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_VERSION_INFO,
				A6XX_GMU_HFI_VERSION_INFO),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
				A6XX_GMU_HFI_SFR_ADDR),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
				A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
				A6XX_GMU_GMU2HOST_INTR_CLR),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
				A6XX_GMU_GMU2HOST_INTR_INFO),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
				A6XX_GMU_GMU2HOST_INTR_MASK),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
				A6XX_GMU_HOST2GMU_INTR_SET),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
				A6XX_GMU_HOST2GMU_INTR_CLR),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
				A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_NMI_CONTROL_STATUS,
				A6XX_GMU_NMI_CONTROL_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_CM3_CFG,
				A6XX_GMU_CM3_CFG),
};

static int cpu_gpu_lock(struct cpu_gpu_lock *lock)
+16 −10
Original line number Diff line number Diff line
@@ -779,11 +779,19 @@ static void a6xx_gmu_oob_clear(struct kgsl_device *device,

static void a6xx_gmu_irq_enable(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct a6xx_hfi *hfi = &gmu->hfi;

	/* Clear pending IRQs and Unmask needed IRQs */
	adreno_gmu_clear_and_unmask_irqs(adreno_dev);
	gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
	gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, 0xffffffff);

	gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
		(unsigned int)~HFI_IRQ_MASK);
	gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK,
		(unsigned int)~GMU_AO_INT_MASK);


	/* Enable all IRQs on host */
	enable_irq(hfi->irq);
@@ -792,6 +800,7 @@ static void a6xx_gmu_irq_enable(struct adreno_device *adreno_dev)

static void a6xx_gmu_irq_disable(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct a6xx_hfi *hfi = &gmu->hfi;

@@ -800,7 +809,12 @@ static void a6xx_gmu_irq_disable(struct adreno_device *adreno_dev)
	disable_irq(hfi->irq);

	/* Mask all IRQs and clear pending IRQs */
	adreno_gmu_mask_and_clear_irqs(adreno_dev);
	gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK, 0xffffffff);
	gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK, 0xffffffff);

	gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
	gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, 0xffffffff);

}

static int a6xx_gmu_hfi_start_msg(struct adreno_device *adreno_dev)
@@ -1864,11 +1878,6 @@ static bool a6xx_gmu_scales_bandwidth(struct kgsl_device *device)
	return (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640);
}

static u64 a6xx_gmu_read_alwayson(struct kgsl_device *device)
{
	return a6xx_read_alwayson(ADRENO_DEVICE(device));
}

static irqreturn_t a6xx_gmu_irq_handler(int irq, void *data)
{
	struct kgsl_device *device = data;
@@ -2239,9 +2248,6 @@ static struct gmu_dev_ops a6xx_gmudev = {
	.snapshot = a6xx_gmu_device_snapshot,
	.cooperative_reset = a6xx_gmu_cooperative_reset,
	.wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
	.read_alwayson = a6xx_gmu_read_alwayson,
	.gmu2host_intr_mask = HFI_IRQ_MASK,
	.gmu_ao_intr_mask = GMU_AO_INT_MASK,
	.scales_bandwidth = a6xx_gmu_scales_bandwidth,
};

+14 −17
Original line number Diff line number Diff line
@@ -160,8 +160,8 @@ static int a6xx_hfi_queue_write(struct adreno_device *adreno_dev,
	wmb();

	/* Send interrupt to GMU to receive the message */
	adreno_write_gmureg(adreno_dev,
		ADRENO_REG_GMU_HOST2GMU_INTR_SET, 0x1);
	gmu_core_regwrite(KGSL_DEVICE(adreno_dev), A6XX_GMU_HOST2GMU_INTR_SET,
		0x1);

	return 0;
}
@@ -274,29 +274,29 @@ static int receive_ack_cmd(struct a6xx_gmu_device *gmu, void *rcvd,
#define MSG_HDR_SET_SEQNUM(hdr, num) \
	(((hdr) & 0xFFFFF) | ((num) << 20))

static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev,
	enum adreno_regs offset_name, unsigned int expected_val,
static int poll_gmu_reg(struct adreno_device *adreno_dev,
	u32 offsetdwords, unsigned int expected_val,
	unsigned int mask, unsigned int timeout_ms)
{
	unsigned int val;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
	u64 ao_pre_poll, ao_post_poll;

	ao_pre_poll = gmu_core_dev_read_alwayson(device);
	ao_pre_poll = a6xx_read_alwayson(adreno_dev);

	while (time_is_after_jiffies(timeout)) {
		adreno_read_gmureg(adreno_dev, offset_name, &val);
		gmu_core_regread(device, offsetdwords, &val);
		if ((val & mask) == expected_val)
			return 0;
		usleep_range(10, 100);
	}

	ao_post_poll = gmu_core_dev_read_alwayson(device);
	ao_post_poll = a6xx_read_alwayson(adreno_dev);

	/* Check one last time */
	adreno_read_gmureg(adreno_dev, offset_name, &val);
	gmu_core_regread(device, offsetdwords, &val);
	if ((val & mask) == expected_val)
		return 0;

@@ -325,7 +325,7 @@ static int a6xx_hfi_send_cmd(struct adreno_device *adreno_dev,
	if (rc)
		return rc;

	rc = poll_adreno_gmu_reg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
	rc = poll_gmu_reg(adreno_dev, A6XX_GMU_GMU2HOST_INTR_INFO,
		HFI_IRQ_MSGQ_MASK, HFI_IRQ_MSGQ_MASK, HFI_RSP_TIMEOUT);

	if (rc) {
@@ -337,7 +337,7 @@ static int a6xx_hfi_send_cmd(struct adreno_device *adreno_dev,
	}

	/* Clear the interrupt */
	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
	gmu_core_regwrite(KGSL_DEVICE(adreno_dev), A6XX_GMU_GMU2HOST_INTR_CLR,
		HFI_IRQ_MSGQ_MASK);

	rc = a6xx_hfi_process_queue(gmu, HFI_MSG_ID, ret_cmd);
@@ -841,14 +841,11 @@ int a6xx_hfi_send_req(struct adreno_device *adreno_dev, unsigned int id,
irqreturn_t a6xx_hfi_irq_handler(int irq, void *data)
{
	struct kgsl_device *device = data;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(ADRENO_DEVICE(device));
	unsigned int status = 0;

	adreno_read_gmureg(adreno_dev,
			ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
	adreno_write_gmureg(adreno_dev,
			ADRENO_REG_GMU_GMU2HOST_INTR_CLR, HFI_IRQ_MASK);
	gmu_core_regread(device, A6XX_GMU_GMU2HOST_INTR_INFO, &status);
	gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, HFI_IRQ_MASK);

	if (status & HFI_IRQ_DBGQ_MASK)
		a6xx_hfi_process_queue(gmu, HFI_DBG_ID, NULL);
Loading