Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd6a4f9f authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Update the sequence to resume stalled SMMU on no GMU devices"

parents 3f07f340 afae1ca0
Loading
Loading
Loading
Loading
+54 −0
Original line number Original line Diff line number Diff line
@@ -1655,6 +1655,60 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
	adreno_fault_detect_start(adreno_dev);
	adreno_fault_detect_start(adreno_dev);
}
}


static void do_gbif_halt(struct adreno_device *adreno_dev,
	u32 halt_reg, u32 ack_reg, u32 mask, const char *client)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned long t;
	u32 val;

	adreno_writereg(adreno_dev, halt_reg, mask);

	t = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
	do {
		adreno_readreg(adreno_dev, ack_reg, &val);
		if ((val & mask) == mask)
			return;

		/*
		 * If we are attempting GBIF halt in case of stall-on-fault
		 * then the halt sequence will not complete as long as SMMU
		 * is stalled.
		 */
		kgsl_mmu_pagefault_resume(&device->mmu);
		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	/* Check one last time */
	kgsl_mmu_pagefault_resume(&device->mmu);

	adreno_readreg(adreno_dev, ack_reg, &val);
	if ((val & mask) == mask)
		return;

	dev_err(device->dev, "%s GBIF Halt ack timed out\n", client);
}

/**
 * adreno_smmu_resume - Clears stalled/pending transactions in GBIF pipe
 * and resumes stalled SMMU
 * @adreno_dev: Pointer to the the adreno device
 */
void adreno_smmu_resume(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	/* Halt GBIF GX traffic */
	if (gmu_core_dev_gx_is_on(KGSL_DEVICE(adreno_dev)))
		do_gbif_halt(adreno_dev, ADRENO_REG_RBBM_GBIF_HALT,
			ADRENO_REG_RBBM_GBIF_HALT_ACK,
			gpudev->gbif_gx_halt_mask, "GX");

	/* Halt all CX traffic */
	do_gbif_halt(adreno_dev, ADRENO_REG_GBIF_HALT,
		ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_arb_halt_mask, "CX");
}

/**
/**
 * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe
 * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe
 * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared
 * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared
+1 −0
Original line number Original line Diff line number Diff line
@@ -1820,4 +1820,5 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
	unsigned int fence_mask);
	unsigned int fence_mask);
int adreno_clear_pending_transactions(struct kgsl_device *device);
int adreno_clear_pending_transactions(struct kgsl_device *device);
void adreno_gmu_send_nmi(struct adreno_device *adreno_dev);
void adreno_gmu_send_nmi(struct adreno_device *adreno_dev);
void adreno_smmu_resume(struct adreno_device *adreno_dev);
#endif /*__ADRENO_H */
#endif /*__ADRENO_H */
+2 −49
Original line number Original line Diff line number Diff line
@@ -1171,40 +1171,6 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)


#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))


static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
	u32 mask, const char *client)
{
	u32 ack;
	unsigned long t;

	kgsl_regwrite(device, reg, mask);

	t = jiffies + msecs_to_jiffies(100);
	do {
		kgsl_regread(device, ack_reg, &ack);
		if ((ack & mask) == mask)
			return;

		/*
		 * If we are attempting recovery in case of stall-on-fault
		 * then the halt sequence will not complete as long as SMMU
		 * is stalled.
		 */
		kgsl_mmu_pagefault_resume(&device->mmu);

		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	/* Check one last time */
	kgsl_mmu_pagefault_resume(&device->mmu);

	kgsl_regread(device, ack_reg, &ack);
	if ((ack & mask) == mask)
		return;

	dev_err(device->dev, "%s GBIF halt timed out\n", client);
}

static int a6xx_gmu_suspend(struct kgsl_device *device)
static int a6xx_gmu_suspend(struct kgsl_device *device)
{
{
	int ret = 0;
	int ret = 0;
@@ -1228,21 +1194,8 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)


	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);


	if (adreno_has_gbif(adreno_dev)) {
	if (adreno_has_gbif(adreno_dev))
		struct adreno_gpudev *gpudev =
		adreno_smmu_resume(adreno_dev);
			ADRENO_GPU_DEVICE(adreno_dev);

		/* Halt GX traffic */
		if (a6xx_gmu_gx_is_on(device))
			do_gbif_halt(device, A6XX_RBBM_GBIF_HALT,
				A6XX_RBBM_GBIF_HALT_ACK,
				gpudev->gbif_gx_halt_mask,
				"GX");

		/* Halt CX traffic */
		do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
			gpudev->gbif_arb_halt_mask, "CX");
	}


	if (a6xx_gmu_gx_is_on(device))
	if (a6xx_gmu_gx_is_on(device))
		kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
		kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
+26 −12
Original line number Original line Diff line number Diff line
@@ -2092,7 +2092,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
	int ret, i;
	int ret, i;
	int fault;
	int fault;
	int halt;
	int halt;
	bool gx_on;
	bool gx_on, smmu_stalled = false;


	fault = atomic_xchg(&dispatcher->fault, 0);
	fault = atomic_xchg(&dispatcher->fault, 0);
	if (fault == 0)
	if (fault == 0)
@@ -2133,19 +2133,21 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
	 * proceed if the fault handler has already run in the IRQ thread,
	 * proceed if the fault handler has already run in the IRQ thread,
	 * else return early to give the fault handler a chance to run.
	 * else return early to give the fault handler a chance to run.
	 */
	 */
	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
	if (gx_on) {
		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
		gx_on) {
		unsigned int val;
		unsigned int val;


		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &val);
		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &val);
		if (val & BIT(24)) {
		if (val & BIT(24))
			smmu_stalled = true;
	}

	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
		smmu_stalled) {
		mutex_unlock(&device->mutex);
		mutex_unlock(&device->mutex);
			dev_err(device->dev,
		dev_err(device->dev, "SMMU is stalled without a pagefault\n");
				"SMMU is stalled without a pagefault\n");
		return -EBUSY;
		return -EBUSY;
	}
	}
	}


	/* Turn off all the timers */
	/* Turn off all the timers */
	del_timer_sync(&dispatcher->timer);
	del_timer_sync(&dispatcher->timer);
@@ -2211,8 +2213,20 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
		gpudev->gpu_keepalive(adreno_dev, false);
		gpudev->gpu_keepalive(adreno_dev, false);


	/* Terminate the stalled transaction and resume the IOMMU */
	/* Terminate the stalled transaction and resume the IOMMU */
	if (fault & ADRENO_IOMMU_PAGE_FAULT)
	if (fault & ADRENO_IOMMU_PAGE_FAULT) {
		/*
		 * This needs to be triggered only if GBIF is supported, GMU is
		 * not enabled and SMMU is stalled because sequence is only
		 * valid for GPU which has GBIF and If GMU is enabled this is
		 * taken care in GMU suspend and it is required only if SMMU is
		 * stalled.
		 */
		if (adreno_has_gbif(adreno_dev) &&
			!gmu_core_isenabled(device) && smmu_stalled)
			adreno_smmu_resume(adreno_dev);
		else
			kgsl_mmu_pagefault_resume(&device->mmu);
			kgsl_mmu_pagefault_resume(&device->mmu);
	}


	/* Reset the dispatcher queue */
	/* Reset the dispatcher queue */
	dispatcher->inflight = 0;
	dispatcher->inflight = 0;