Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ffbd9932 authored by Sushmita Susheelendra's avatar Sushmita Susheelendra Committed by Jordan Crouse
Browse files

msm: kgsl: Fix stall on pagefault sequence



As we do not clear FSR in the GPU fault handler,
we have to disable context fault interrupts until
the FSR is cleared. Not doing so results in the
interrupt firing multiple times for the same pagefault.
Fix the CP CACHE_FLUSH_TS interrupt storm by inserting
a WAIT_MEM_WRITES packet just before the EVENT_WRITE -
CACHE_FLUSH_TS. This ensures that all preceding memory
writes have completed.
Also, set the default pagetable for the context else
some functions will return early doing nothing.

Change-Id: Ifdfe10de7b52053560209692b3340a3b300281a2
Signed-off-by: default avatarSushmita Susheelendra <ssusheel@codeaurora.org>
parent 4471b4c1
Loading
Loading
Loading
Loading
+16 −12
Original line number Diff line number Diff line
@@ -1660,17 +1660,15 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	/*
	 * Try soft reset for non mmu fault case only and if VBIF
	 * pipe clears cleanly.
	 * Skip soft reset and use hard reset for A304 GPU, As
	 * A304 is not able to do SMMU programming after soft reset.
	 * Do not do soft reset for a IOMMU fault (because the IOMMU hardware
	 * needs a reset too) or for the A304 because it can't do SMMU
	 * programming of any kind after a soft reset
	 */
	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
			!adreno_is_a304(adreno_dev) &&
			!adreno_vbif_clear_pending_transactions(device))
		return true;

	if ((fault & ADRENO_IOMMU_PAGE_FAULT) || adreno_is_a304(adreno_dev))
		return false;

	return true;
}

/**
@@ -1690,9 +1688,15 @@ int adreno_reset(struct kgsl_device *device, int fault)

	/* Try soft reset first */
	if (adreno_try_soft_reset(device, fault)) {
		/* Make sure VBIF is cleared before resetting */
		ret = adreno_vbif_clear_pending_transactions(device);

		if (ret == 0) {
			ret = adreno_soft_reset(device);
			if (ret)
			KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
				KGSL_DEV_ERR_ONCE(device,
					"Device soft reset failed\n");
		}
	}
	if (ret) {
		/* If soft reset failed/skipped, then pull the power */
+16 −0
Original line number Diff line number Diff line
@@ -606,6 +606,13 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
	if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP)
		total_sizedwords += 9;

	/* WAIT_MEM_WRITES - needed in the stall on fault case
	 * to prevent out of order CP operations that can result
	 * in a CACHE_FLUSH_TS interrupt storm */
	if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
				&adreno_dev->ft_pf_policy))
		total_sizedwords += 1;

	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
	if (IS_ERR(ringcmds))
		return PTR_ERR(ringcmds);
@@ -697,6 +704,15 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
	if (profile_ready)
		adreno_profile_postib_processing(adreno_dev, &flags, &ringcmds);

	/*
	 * WAIT_MEM_WRITES - needed in the stall on fault case to prevent
	 * out of order CP operations that can result in a CACHE_FLUSH_TS
	 * interrupt storm
	 */
	if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
				&adreno_dev->ft_pf_policy))
		*ringcmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 0);

	/*
	 * end-of-pipeline timestamp.  If per context timestamps is not
	 * enabled, then drawctxt will be NULL or internal command flag will be
+29 −23
Original line number Diff line number Diff line
@@ -387,7 +387,11 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
		(flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
		uint32_t sctlr_val;
		ret = -EBUSY;
		/* Disable context fault interrupts */
		/*
		 * Disable context fault interrupts
		 * as we do not clear FSR in the ISR.
		 * Will be re-enabled after FSR is cleared.
		 */
		sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
		sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
		KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
@@ -972,7 +976,7 @@ static void _detach_context(struct kgsl_iommu_context *ctx, struct kgsl_device

static int _setup_user_context(struct kgsl_mmu *mmu)
{
	int ret;
	int ret = 0;
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
	struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device);
@@ -987,8 +991,6 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
			ret = PTR_ERR(mmu->defaultpagetable);
			mmu->defaultpagetable = NULL;
			return ret;
		} else if (mmu->defaultpagetable == NULL) {
			return -ENOMEM;
		}
	}

@@ -998,12 +1000,14 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
	if (ret)
		return ret;

	ctx->default_pt = mmu->defaultpagetable;

	kgsl_iommu_enable_clk(mmu);

	sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);

	/*
	 * For IOMMU V1, if pagefault policy is GPUHALT_ENABLE,
	 * If pagefault policy is GPUHALT_ENABLE,
	 * 1) Program CFCFG to 1 to enable STALL mode
	 * 2) Program HUPCF to 0 (Stall or terminate subsequent
	 *    transactions in the presence of an outstanding fault)
@@ -1025,9 +1029,7 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
	KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
	kgsl_iommu_disable_clk(mmu);

	if (ret)
		_detach_context(ctx, mmu->device);
	return ret;
	return 0;
}

static int _setup_secure_context(struct kgsl_mmu *mmu)
@@ -1278,10 +1280,24 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_context  *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
	unsigned int sctlr_val;

	if (ctx->default_pt != NULL) {
		kgsl_iommu_enable_clk(mmu);
		KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
		/*
		 * Re-enable context fault interrupts after clearing
		 * FSR to prevent the interrupt from firing repeatedly
		 */
		sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
		sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
		KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
		/*
		 * Make sure the above register writes
		 * are not reordered across the barrier
		 * as we use writel_relaxed to write them
		 */
		wmb();
		kgsl_iommu_disable_clk(mmu);
	}
}
@@ -1290,19 +1306,13 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
	unsigned int sctlr_val;

	if (ctx->default_pt != NULL && ctx->fault) {
		/*
		 * Write 1 to RESUME.TnR to terminate the
		 * stalled transaction. Also, re-enable
		 * context fault interrupts by writing 1
		 * to SCTLR.CFIE
		 * stalled transaction.
		 */
		sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
		sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
		KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
		KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
		/*
		 * Make sure the above register writes
		 * are not reordered across the barrier
@@ -1336,16 +1346,12 @@ static int kgsl_iommu_close(struct kgsl_mmu *mmu)
	for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
		_detach_context(&iommu->ctx[i], mmu->device);

	if (mmu->defaultpagetable != NULL) {
	kgsl_mmu_putpagetable(mmu->defaultpagetable);
	mmu->defaultpagetable = NULL;
	}


	if (mmu->securepagetable != NULL) {
	kgsl_mmu_putpagetable(mmu->securepagetable);
	mmu->securepagetable = NULL;
	}

	if (iommu->regbase != NULL)
		iounmap(iommu->regbase);