Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 89704d3c authored by Hareesh Gundu's avatar Hareesh Gundu Committed by Gerrit - the friendly Code Review server
Browse files

msm: kgsl: Expire MARKER commands even inflight is full



If inflight queue is full and command batches are taking longer time
for completion there is a possibility of holding the MARKER commands
in context queue for longer duration, though marker command and the
dependent timestamp are retired.

CRs-Fixed: 828303
Change-Id: I9f9cf88eb34d128ea0a0ded057c3a6d0fa86d66d
Signed-off-by: default avatarHareesh Gundu <hareeshg@codeaurora.org>
parent 66e9dfa9
Loading
Loading
Loading
Loading
+65 −20
Original line number Diff line number Diff line
@@ -329,10 +329,15 @@ static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
		ADRENO_CONTEXT_CMDQUEUE_SIZE);
	drawctxt->queued--;
}

static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
/**
 * Removes all expired marker and sync cmdbatches from
 * the context queue when marker command and dependent
 * timestamp are retired. This function is recursive.
 * returns cmdbatch if context has command, NULL otherwise.
 */
static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
{
	struct kgsl_cmdbatch *cmdbatch = NULL;
	struct kgsl_cmdbatch *cmdbatch;
	bool pending = false;

	if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
@@ -340,16 +345,55 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)

	cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];

	if (cmdbatch == NULL)
		return NULL;

	/* Check to see if this is a marker we can skip over */
	if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
		if (_marker_expired(cmdbatch)) {
	if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
			_marker_expired(cmdbatch)) {
		_pop_cmdbatch(drawctxt);
		_retire_marker(cmdbatch);
		return _expire_markers(drawctxt);
	}

	if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
		/*
		 * We may have cmdbatch timer running, which also uses same
		 * lock, take a lock with software interrupt disabled (bh) to
		 * avoid spin lock recursion.
		 */
		spin_lock_bh(&cmdbatch->lock);
		if (!list_empty(&cmdbatch->synclist))
			pending = true;
		spin_unlock_bh(&cmdbatch->lock);

		if (!pending) {
			_pop_cmdbatch(drawctxt);
			kgsl_cmdbatch_destroy(cmdbatch);
			return _expire_markers(drawctxt);
		}
	}

	return cmdbatch;
}

			/* Get the next thing in the queue */
			return _get_cmdbatch(drawctxt);
static void expire_markers(struct adreno_context *drawctxt)
{
	spin_lock(&drawctxt->lock);
	_expire_markers(drawctxt);
	spin_unlock(&drawctxt->lock);
}

static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
{
	struct kgsl_cmdbatch *cmdbatch;
	bool pending = false;

	cmdbatch = _expire_markers(drawctxt);

	if (cmdbatch == NULL)
		return NULL;

	/*
	 * If the marker isn't expired but the SKIP bit is set
	 * then there are real commands following this one in
@@ -358,10 +402,9 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
	 * correct.  If skip isn't set then we block this queue
	 * until the dependent timestamp expires
	 */

		if (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))
	if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
			(!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
		pending = true;
	}

	/*
	 * We may have cmdbatch timer running, which also uses same lock,
@@ -636,8 +679,10 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
	int inflight = _cmdqueue_inflight(dispatch_q);
	unsigned int timestamp;

	if (dispatch_q->inflight >= inflight)
	if (dispatch_q->inflight >= inflight) {
		expire_markers(drawctxt);
		return -EBUSY;
	}

	/*
	 * Each context can send a specific number of command batches per cycle