Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25ffd4b1 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Markup expected timeline locks for i915_active



As every i915_active_request should be serialised by a dedicated lock,
i915_active consists of a tree of locks; one for each node. Markup up
the i915_active_request with what lock is supposed to be guarding it so
that we can verify that the serialised updated are indeed serialised.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190816121000.8507-2-chris@chris-wilson.co.uk
parent 6c69a454
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
	if (IS_ERR(rq))
		return rq;

	err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq);
	err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
	if (err) {
		i915_request_add(rq);
		return ERR_PTR(err);
+1 −1
Original line number Diff line number Diff line
@@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work)
	 * keep track of the GPU activity within this vma/request, and
	 * propagate the signal from the request to w->dma.
	 */
	err = i915_active_ref(&vma->active, rq->fence.context, rq);
	err = i915_active_ref(&vma->active, rq->timeline, rq);
	if (err)
		goto out_request;

+1 −1
Original line number Diff line number Diff line
@@ -908,7 +908,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
		if (emit)
			err = emit(rq, data);
		if (err == 0)
			err = i915_active_ref(&cb->base, rq->fence.context, rq);
			err = i915_active_ref(&cb->base, rq->timeline, rq);

		i915_request_add(rq);
		if (err)
+3 −8
Original line number Diff line number Diff line
@@ -306,10 +306,10 @@ int intel_context_prepare_remote_request(struct intel_context *ce,

		/* Queue this switch after current activity by this context. */
		err = i915_active_request_set(&tl->last_request, rq);
		mutex_unlock(&tl->mutex);
		if (err)
			goto unlock;
			return err;
	}
	lockdep_assert_held(&tl->mutex);

	/*
	 * Guarantee context image and the timeline remains pinned until the
@@ -319,12 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
	 * words transfer the pinned ce object to tracked active request.
	 */
	GEM_BUG_ON(i915_active_is_idle(&ce->active));
	err = i915_active_ref(&ce->active, rq->fence.context, rq);

unlock:
	if (rq->timeline != tl)
		mutex_unlock(&tl->mutex);
	return err;
	return i915_active_ref(&ce->active, rq->timeline, rq);
}

struct i915_request *intel_context_create_request(struct intel_context *ce)
+1 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@ static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
			      struct i915_request *rq)
{
	return i915_active_ref(&node->active, rq->fence.context, rq);
	return i915_active_ref(&node->active, rq->timeline, rq);
}

static inline void
Loading