Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 97f06158 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Pull seqno started checks together



We have a few instances of checking seqno-1 to see if the HW has started
the request. Pull those together under a helper.

v2: Pull the !seqno assertion higher, as given seqno==1 we may indeed
check to see if we have started using seqno==0.

Suggested-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180806112605.20725-1-chris@chris-wilson.co.uk
parent cf1f697a
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request)

	seqno = timeline_get_seqno(&engine->timeline);
	GEM_BUG_ON(!seqno);
	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
	GEM_BUG_ON(intel_engine_signaled(engine, seqno));

	/* We may be recursing from the signal callback of another i915 fence */
	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request)
	 */
	GEM_BUG_ON(!request->global_seqno);
	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
				     request->global_seqno));
	GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
	engine->timeline.seqno--;

	/* We may be recursing from the signal callback of another i915 fence */
@@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
	 * it is a fair assumption that it will not complete within our
	 * relatively short timeout.
	 */
	if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
	if (!intel_engine_has_started(engine, seqno))
		return false;

	/*
@@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
	irq = READ_ONCE(engine->breadcrumbs.irq_count);
	timeout_us += local_clock_us(&cpu);
	do {
		if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
		if (intel_engine_has_completed(engine, seqno))
			return seqno == i915_request_global_seqno(rq);

		/*
+25 −14
Original line number Diff line number Diff line
@@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)

static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
					    u32 seqno);
static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
					      u32 seqno);

/**
 * Returns true if seq1 is later than seq2.
@@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
	return (s32)(seq1 - seq2) >= 0;
}

/**
 * i915_request_started - check if the request has begun being executed
 * @rq: the request
 *
 * Returns true if the request has been submitted to hardware, and the hardware
 * has advanced passed the end of the previous request and so should be either
 * currently processing the request (though it may be preempted and so
 * not necessarily the next request to complete) or have completed the request.
 */
static inline bool i915_request_started(const struct i915_request *rq)
{
	u32 seqno;

	seqno = i915_request_global_seqno(rq);
	if (!seqno) /* not yet submitted to HW */
		return false;

	return intel_engine_has_started(rq->engine, seqno);
}

static inline bool
__i915_request_completed(const struct i915_request *rq, u32 seqno)
{
	GEM_BUG_ON(!seqno);
	return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
	return intel_engine_has_completed(rq->engine, seqno) &&
		seqno == i915_request_global_seqno(rq);
}

@@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
	return __i915_request_completed(rq, seqno);
}

static inline bool i915_request_started(const struct i915_request *rq)
{
	u32 seqno;

	seqno = i915_request_global_seqno(rq);
	if (!seqno)
		return false;

	return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
				 seqno - 1);
}

static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
{
	const struct i915_request *rq =
+2 −4
Original line number Diff line number Diff line
@@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
	spin_unlock(&b->irq_lock);

	rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
		GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
					      wait->seqno));
		GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
		RB_CLEAR_NODE(&wait->node);
		wake_up_process(wait->tsk);
	}
@@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
		return armed;

	/* Make the caller recheck if its request has already started. */
	return i915_seqno_passed(intel_engine_get_seqno(engine),
				 wait->seqno - 1);
	return intel_engine_has_started(engine, wait->seqno);
}

static inline bool chain_wakeup(struct rb_node *rb, int priority)
+1 −2
Original line number Diff line number Diff line
@@ -968,8 +968,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
		return true;

	/* Any inflight/incomplete requests? */
	if (!i915_seqno_passed(intel_engine_get_seqno(engine),
			       intel_engine_last_submit(engine)))
	if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
		return false;

	if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
+1 −1
Original line number Diff line number Diff line
@@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
		return -1;

	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
	if (intel_engine_signaled(signaller, seqno))
		return 1;

	/* cursory check for an unkickable deadlock */
Loading