Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b817ad29 authored by Pankaj Gupta's avatar Pankaj Gupta
Browse files

msm: kgsl: Update the sequence to resume stalled SMMU on no GMU devices



Recently, sequence to resume stalled SMMU was updated to use GBIF GX
and CX halt to properly resume stalled SMMU but it was only taken care
for GMU devices. This patch updates the sequence for no GMU devices
as well to ensure that this debug feature works properly.

Change-Id: Idbdc5c8782ef5a63d05c879996792ce42c1bcf0b
Signed-off-by: default avatarDeepak Kumar <dkumar@codeaurora.org>
Signed-off-by: default avatarPankaj Gupta <gpankaj@codeaurora.org>
parent 24bf2ae1
Loading
Loading
Loading
Loading
+48 −0
Original line number Diff line number Diff line
@@ -1243,6 +1243,20 @@ static int a6xx_reset(struct kgsl_device *device)
	int ret;
	unsigned long flags = device->pwrctrl.ctrl_flags;

	/*
	 * Stall on fault needs GBIF halt sequences for robust recovery.
	 * Because in a worst-case scenario, if any of the GPU blocks is
	 * generating a stream of un-ending faulting transactions, SMMU will
	 * process those transactions when we try to resume it and enter
	 * stall-on-fault mode again. GBIF halt sequences make sure that all
	 * GPU transactions are halted at GBIF which ensures that SMMU
	 * can resume safely.
	 */
	a6xx_do_gbif_halt(adreno_dev, A6XX_RBBM_GBIF_HALT,
		A6XX_RBBM_GBIF_HALT_ACK, A6XX_GBIF_GX_HALT_MASK, "GX");
	a6xx_do_gbif_halt(adreno_dev, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
		A6XX_GBIF_ARB_HALT_MASK, "CX");

	/* Clear ctrl_flags to ensure clocks and regulators are turned off */
	device->pwrctrl.ctrl_flags = 0;

@@ -2412,6 +2426,40 @@ u64 a6xx_read_alwayson(struct adreno_device *adreno_dev)
	return (((u64) hi) << 32) | lo;
}

void a6xx_do_gbif_halt(struct adreno_device *adreno_dev,
	u32 halt_reg, u32 ack_reg, u32 mask, const char *client)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned long t;
	u32 val;

	kgsl_regwrite(device, halt_reg, mask);

	t = jiffies + msecs_to_jiffies(100);
	do {
		kgsl_regread(device, ack_reg, &val);
		if ((val & mask) == mask)
			return;

		/*
		 * If we are attempting GBIF halt in case of stall-on-fault
		 * then the halt sequence will not complete as long as SMMU
		 * is stalled.
		 */
		kgsl_mmu_pagefault_resume(&device->mmu);
		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	/* Check one last time */
	kgsl_mmu_pagefault_resume(&device->mmu);

	kgsl_regread(device, ack_reg, &val);
	if ((val & mask) == mask)
		return;

	dev_err(device->dev, "%s GBIF Halt ack timed out\n", client);
}

struct adreno_gpudev adreno_a6xx_gpudev = {
	.reg_offsets = a6xx_register_offsets,
	.probe = a6xx_probe,
+11 −0
Original line number Diff line number Diff line
@@ -317,6 +317,17 @@ int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);

/**
 * a6xx_do_gbif_halt - halt gbif traffic and wait for ack
 * @adreno_dev: An Adreno GPU handle
 * @halt_reg: reg to trigger gbif halt
 * @ack_reg: status register to check for ack
 * @mask: mask for ack
 * @client: client name - "GX" or "CX"
 */
void a6xx_do_gbif_halt(struct adreno_device *adreno_dev,
	u32 halt_reg, u32 ack_reg, u32 mask, const char *client);

/**
 * a6xx_read_alwayson - Read the current always on clock value
 * @adreno_dev: An Adreno GPU handle
+2 −36
Original line number Diff line number Diff line
@@ -1616,40 +1616,6 @@ static int a6xx_gmu_init(struct adreno_device *adreno_dev)

#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))

static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
	u32 mask, const char *client)
{
	u32 ack;
	unsigned long t;

	kgsl_regwrite(device, reg, mask);

	t = jiffies + msecs_to_jiffies(100);
	do {
		kgsl_regread(device, ack_reg, &ack);
		if ((ack & mask) == mask)
			return;

		/*
		 * If we are attempting recovery in case of stall-on-fault
		 * then the halt sequence will not complete as long as SMMU
		 * is stalled.
		 */
		kgsl_mmu_pagefault_resume(&device->mmu);

		usleep_range(10, 100);
	} while (!time_after(jiffies, t));

	/* Check one last time */
	kgsl_mmu_pagefault_resume(&device->mmu);

	kgsl_regread(device, ack_reg, &ack);
	if ((ack & mask) == mask)
		return;

	dev_err(device->dev, "%s GBIF halt timed out\n", client);
}

static void a6xx_gmu_pwrctrl_suspend(struct adreno_device *adreno_dev)
{
	int ret = 0;
@@ -1676,13 +1642,13 @@ static void a6xx_gmu_pwrctrl_suspend(struct adreno_device *adreno_dev)
	if (adreno_has_gbif(adreno_dev)) {
		/* Halt GX traffic */
		if (a6xx_gmu_gx_is_on(device))
			do_gbif_halt(device, A6XX_RBBM_GBIF_HALT,
			a6xx_do_gbif_halt(adreno_dev, A6XX_RBBM_GBIF_HALT,
				A6XX_RBBM_GBIF_HALT_ACK,
				A6XX_GBIF_GX_HALT_MASK,
				"GX");

		/* Halt CX traffic */
		do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
		a6xx_do_gbif_halt(adreno_dev, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
			A6XX_GBIF_ARB_HALT_MASK, "CX");
	}