Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9726920b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Only reset the pinned kernel contexts on resume



On resume, we know that the only pinned contexts in danger of seeing
corruption are the kernel context, and so we do not need to walk the
list of all GEM contexts as we tracked them on each engine.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190410190120.830-1-chris@chris-wilson.co.uk
parent feb8846b
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1995,7 +1995,6 @@ struct drm_i915_private {

	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
	struct {
		void (*resume)(struct drm_i915_private *);
		void (*cleanup_engine)(struct intel_engine_cs *engine);

		struct i915_gt_timelines {
+3 −6
Original line number Diff line number Diff line
@@ -4513,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
	i915->gt.resume(i915);
	intel_gt_resume(i915);

	if (i915_gem_init_hw(i915))
		goto err_wedged;
@@ -4853,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)

	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);

	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
		dev_priv->gt.resume = intel_lr_context_resume;
	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
	} else {
		dev_priv->gt.resume = intel_legacy_submission_resume;
	else
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
	}

	i915_timelines_init(dev_priv);

+1 −0
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@ struct intel_context_ops {
	int (*pin)(struct intel_context *ce);
	void (*unpin)(struct intel_context *ce);

	void (*reset)(struct intel_context *ce);
	void (*destroy)(struct kref *kref);
};

+24 −0
Original line number Diff line number Diff line
@@ -753,6 +753,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
	return ret;
}

void intel_gt_resume(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/*
	 * After resume, we may need to poke into the pinned kernel
	 * contexts to paper over any damage caused by the sudden suspend.
	 * Only the kernel contexts should remain pinned over suspend,
	 * allowing us to fixup the user contexts on their first pin.
	 */
	for_each_engine(engine, i915, id) {
		struct intel_context *ce;

		ce = engine->kernel_context;
		if (ce)
			ce->ops->reset(ce);

		ce = engine->preempt_context;
		if (ce)
			ce->ops->reset(ce);
	}
}

/**
 * intel_engines_cleanup_common - cleans up the engine state created by
 *                                the common initiailizers.
+24 −25
Original line number Diff line number Diff line
@@ -1379,9 +1379,33 @@ static int execlists_context_pin(struct intel_context *ce)
	return __execlists_context_pin(ce, ce->engine);
}

static void execlists_context_reset(struct intel_context *ce)
{
	/*
	 * Because we emit WA_TAIL_DWORDS there may be a disparity
	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
	 * that stored in context. As we only write new commands from
	 * ce->ring->tail onwards, everything before that is junk. If the GPU
	 * starts reading from its RING_HEAD from the context, it may try to
	 * execute that junk and die.
	 *
	 * The contexts that are stilled pinned on resume belong to the
	 * kernel, and are local to each engine. All other contexts will
	 * have their head/tail sanitized upon pinning before use, so they
	 * will never see garbage,
	 *
	 * So to avoid that we reset the context images upon resume. For
	 * simplicity, we just zero everything out.
	 */
	intel_ring_reset(ce->ring, 0);
	__execlists_update_reg_state(ce, ce->engine);
}

static const struct intel_context_ops execlists_context_ops = {
	.pin = execlists_context_pin,
	.unpin = execlists_context_unpin,

	.reset = execlists_context_reset,
	.destroy = execlists_context_destroy,
};

@@ -2895,31 +2919,6 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
	return ret;
}

void intel_lr_context_resume(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx;
	struct intel_context *ce;

	/*
	 * Because we emit WA_TAIL_DWORDS there may be a disparity
	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
	 * that stored in context. As we only write new commands from
	 * ce->ring->tail onwards, everything before that is junk. If the GPU
	 * starts reading from its RING_HEAD from the context, it may try to
	 * execute that junk and die.
	 *
	 * So to avoid that we reset the context images upon resume. For
	 * simplicity, we just zero everything out.
	 */
	list_for_each_entry(ctx, &i915->contexts.list, link) {
		list_for_each_entry(ce, &ctx->active_engines, active_link) {
			GEM_BUG_ON(!ce->ring);
			intel_ring_reset(ce->ring, 0);
			__execlists_update_reg_state(ce, ce->engine);
		}
	}
}

void intel_execlists_show_requests(struct intel_engine_cs *engine,
				   struct drm_printer *m,
				   void (*show_request)(struct drm_printer *m,
Loading