Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c69a454 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gt: Mark context->active_count as protected by timeline->mutex



We use timeline->mutex to protect modifications to
context->active_count, and the associated enable/disable callbacks.
Due to complications with engine-pm barrier there is a path where we used
a "superlock" to provide serialised protect and so could not
unconditionally assert with lockdep that it was always held. However,
we can mark the mutex as taken (noting that we may be nested underneath
ourselves) which means we can be reassured the right timeline->mutex is
always treated as held and let lockdep roam free.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190816121000.8507-1-chris@chris-wilson.co.uk
parent f789fbb1
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -89,17 +89,20 @@ void intel_context_exit_engine(struct intel_context *ce);


static inline void intel_context_enter(struct intel_context *ce)
static inline void intel_context_enter(struct intel_context *ce)
{
{
	lockdep_assert_held(&ce->timeline->mutex);
	if (!ce->active_count++)
	if (!ce->active_count++)
		ce->ops->enter(ce);
		ce->ops->enter(ce);
}
}


static inline void intel_context_mark_active(struct intel_context *ce)
static inline void intel_context_mark_active(struct intel_context *ce)
{
{
	lockdep_assert_held(&ce->timeline->mutex);
	++ce->active_count;
	++ce->active_count;
}
}


static inline void intel_context_exit(struct intel_context *ce)
static inline void intel_context_exit(struct intel_context *ce)
{
{
	lockdep_assert_held(&ce->timeline->mutex);
	GEM_BUG_ON(!ce->active_count);
	GEM_BUG_ON(!ce->active_count);
	if (!--ce->active_count)
	if (!--ce->active_count)
		ce->ops->exit(ce);
		ce->ops->exit(ce);
+1 −1
Original line number Original line Diff line number Diff line
@@ -61,7 +61,7 @@ struct intel_context {
	u32 *lrc_reg_state;
	u32 *lrc_reg_state;
	u64 lrc_desc;
	u64 lrc_desc;


	unsigned int active_count; /* notionally protected by timeline->mutex */
	unsigned int active_count; /* protected by timeline->mutex */


	atomic_t pin_count;
	atomic_t pin_count;
	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+14 −0
Original line number Original line Diff line number Diff line
@@ -37,6 +37,16 @@ static int __engine_unpark(struct intel_wakeref *wf)
	return 0;
	return 0;
}
}


static inline void __timeline_mark_lock(struct intel_context *ce)
{
	mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
}

static inline void __timeline_mark_unlock(struct intel_context *ce)
{
	mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
}

static bool switch_to_kernel_context(struct intel_engine_cs *engine)
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
{
	struct i915_request *rq;
	struct i915_request *rq;
@@ -61,6 +71,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
	 * retiring the last request, thus all rings should be empty and
	 * retiring the last request, thus all rings should be empty and
	 * all timelines idle.
	 * all timelines idle.
	 */
	 */
	__timeline_mark_lock(engine->kernel_context);

	rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
	rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
	if (IS_ERR(rq))
	if (IS_ERR(rq))
		/* Context switch failed, hope for the best! Maybe reset? */
		/* Context switch failed, hope for the best! Maybe reset? */
@@ -80,6 +92,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
	__intel_wakeref_defer_park(&engine->wakeref);
	__intel_wakeref_defer_park(&engine->wakeref);
	__i915_request_queue(rq, NULL);
	__i915_request_queue(rq, NULL);


	__timeline_mark_unlock(engine->kernel_context);

	return false;
	return false;
}
}


+4 −0
Original line number Original line Diff line number Diff line
@@ -338,6 +338,8 @@ void intel_timeline_enter(struct intel_timeline *tl)
{
{
	struct intel_gt_timelines *timelines = &tl->gt->timelines;
	struct intel_gt_timelines *timelines = &tl->gt->timelines;


	lockdep_assert_held(&tl->mutex);

	GEM_BUG_ON(!atomic_read(&tl->pin_count));
	GEM_BUG_ON(!atomic_read(&tl->pin_count));
	if (tl->active_count++)
	if (tl->active_count++)
		return;
		return;
@@ -352,6 +354,8 @@ void intel_timeline_exit(struct intel_timeline *tl)
{
{
	struct intel_gt_timelines *timelines = &tl->gt->timelines;
	struct intel_gt_timelines *timelines = &tl->gt->timelines;


	lockdep_assert_held(&tl->mutex);

	GEM_BUG_ON(!tl->active_count);
	GEM_BUG_ON(!tl->active_count);
	if (--tl->active_count)
	if (--tl->active_count)
		return;
		return;
+2 −1
Original line number Original line Diff line number Diff line
@@ -1087,7 +1087,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
	 * precludes optimising to use semaphores serialisation of a single
	 * precludes optimising to use semaphores serialisation of a single
	 * timeline across engines.
	 * timeline across engines.
	 */
	 */
	prev = rcu_dereference_protected(timeline->last_request.request, 1);
	prev = rcu_dereference_protected(timeline->last_request.request,
					 lockdep_is_held(&timeline->mutex));
	if (prev && !i915_request_completed(prev)) {
	if (prev && !i915_request_completed(prev)) {
		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
			i915_sw_fence_await_sw_fence(&rq->submit,
			i915_sw_fence_await_sw_fence(&rq->submit,