Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit afae1ca0 authored by Deepak Kumar's avatar Deepak Kumar
Browse files

msm: kgsl: Update the sequence to resume stalled SMMU on no GMU devices



Recently, sequence to resume stalled SMMU was updated to use GBIF GX
and CX halt to properly resume stalled SMMU but it was only taken care
for GMU devices. This patch updates the sequence for no GMU devices
as well to ensure that this debug feature works properly.

Change-Id: Idbdc5c8782ef5a63d05c879996792ce42c1bcf0b
Signed-off-by: default avatarDeepak Kumar <dkumar@codeaurora.org>
parent 9a711d9c
Loading
Loading
Loading
Loading
+54 −0
Original line number Diff line number Diff line
@@ -1655,6 +1655,60 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
	adreno_fault_detect_start(adreno_dev);
}

static void do_gbif_halt(struct adreno_device *adreno_dev,
	u32 halt_reg, u32 ack_reg, u32 mask, const char *client)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned long t;
	u32 val;

	adreno_writereg(adreno_dev, halt_reg, mask);

	t = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
	do {
		adreno_readreg(adreno_dev, ack_reg, &val);
		if ((val & mask) == mask)
			return;

		/*
		 * If we are attempting GBIF halt in case of stall-on-fault
		 * then the halt sequence will not complete as long as SMMU
		 * is stalled.
		 */
		kgsl_mmu_pagefault_resume(&device->mmu);
		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	/* Check one last time */
	kgsl_mmu_pagefault_resume(&device->mmu);

	adreno_readreg(adreno_dev, ack_reg, &val);
	if ((val & mask) == mask)
		return;

	dev_err(device->dev, "%s GBIF Halt ack timed out\n", client);
}

/**
 * adreno_smmu_resume - Clears stalled/pending transactions in GBIF pipe
 * and resumes stalled SMMU
 * @adreno_dev: Pointer to the the adreno device
 */
void adreno_smmu_resume(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	/* Halt GBIF GX traffic */
	if (gmu_core_dev_gx_is_on(KGSL_DEVICE(adreno_dev)))
		do_gbif_halt(adreno_dev, ADRENO_REG_RBBM_GBIF_HALT,
			ADRENO_REG_RBBM_GBIF_HALT_ACK,
			gpudev->gbif_gx_halt_mask, "GX");

	/* Halt all CX traffic */
	do_gbif_halt(adreno_dev, ADRENO_REG_GBIF_HALT,
		ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_arb_halt_mask, "CX");
}

/**
 * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe
 * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared
+1 −0
Original line number Diff line number Diff line
@@ -1820,4 +1820,5 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
	unsigned int fence_mask);
int adreno_clear_pending_transactions(struct kgsl_device *device);
void adreno_gmu_send_nmi(struct adreno_device *adreno_dev);
void adreno_smmu_resume(struct adreno_device *adreno_dev);
#endif /*__ADRENO_H */
+2 −49
Original line number Diff line number Diff line
@@ -1171,40 +1171,6 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)

#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))

static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
	u32 mask, const char *client)
{
	u32 ack;
	unsigned long t;

	kgsl_regwrite(device, reg, mask);

	t = jiffies + msecs_to_jiffies(100);
	do {
		kgsl_regread(device, ack_reg, &ack);
		if ((ack & mask) == mask)
			return;

		/*
		 * If we are attempting recovery in case of stall-on-fault
		 * then the halt sequence will not complete as long as SMMU
		 * is stalled.
		 */
		kgsl_mmu_pagefault_resume(&device->mmu);

		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	/* Check one last time */
	kgsl_mmu_pagefault_resume(&device->mmu);

	kgsl_regread(device, ack_reg, &ack);
	if ((ack & mask) == mask)
		return;

	dev_err(device->dev, "%s GBIF halt timed out\n", client);
}

static int a6xx_gmu_suspend(struct kgsl_device *device)
{
	int ret = 0;
@@ -1228,21 +1194,8 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)

	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);

	if (adreno_has_gbif(adreno_dev)) {
		struct adreno_gpudev *gpudev =
			ADRENO_GPU_DEVICE(adreno_dev);

		/* Halt GX traffic */
		if (a6xx_gmu_gx_is_on(device))
			do_gbif_halt(device, A6XX_RBBM_GBIF_HALT,
				A6XX_RBBM_GBIF_HALT_ACK,
				gpudev->gbif_gx_halt_mask,
				"GX");

		/* Halt CX traffic */
		do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
			gpudev->gbif_arb_halt_mask, "CX");
	}
	if (adreno_has_gbif(adreno_dev))
		adreno_smmu_resume(adreno_dev);

	if (a6xx_gmu_gx_is_on(device))
		kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
+26 −12
Original line number Diff line number Diff line
@@ -2092,7 +2092,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
	int ret, i;
	int fault;
	int halt;
	bool gx_on;
	bool gx_on, smmu_stalled = false;

	fault = atomic_xchg(&dispatcher->fault, 0);
	if (fault == 0)
@@ -2133,19 +2133,21 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
	 * proceed if the fault handler has already run in the IRQ thread,
	 * else return early to give the fault handler a chance to run.
	 */
	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
		gx_on) {
	if (gx_on) {
		unsigned int val;

		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &val);
		if (val & BIT(24)) {
		if (val & BIT(24))
			smmu_stalled = true;
	}

	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
		smmu_stalled) {
		mutex_unlock(&device->mutex);
			dev_err(device->dev,
				"SMMU is stalled without a pagefault\n");
		dev_err(device->dev, "SMMU is stalled without a pagefault\n");
		return -EBUSY;
	}
	}

	/* Turn off all the timers */
	del_timer_sync(&dispatcher->timer);
@@ -2211,8 +2213,20 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
		gpudev->gpu_keepalive(adreno_dev, false);

	/* Terminate the stalled transaction and resume the IOMMU */
	if (fault & ADRENO_IOMMU_PAGE_FAULT)
	if (fault & ADRENO_IOMMU_PAGE_FAULT) {
		/*
		 * This needs to be triggered only if GBIF is supported, GMU is
		 * not enabled and SMMU is stalled because sequence is only
		 * valid for GPU which has GBIF and If GMU is enabled this is
		 * taken care in GMU suspend and it is required only if SMMU is
		 * stalled.
		 */
		if (adreno_has_gbif(adreno_dev) &&
			!gmu_core_isenabled(device) && smmu_stalled)
			adreno_smmu_resume(adreno_dev);
		else
			kgsl_mmu_pagefault_resume(&device->mmu);
	}

	/* Reset the dispatcher queue */
	dispatcher->inflight = 0;