Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed54c1a1 authored by Dave Gordon's avatar Dave Gordon Committed by Daniel Vetter
Browse files

drm/i915: abolish separate per-ring default_context pointers



Now that we've eliminated a lot of uses of ring->default_context,
we can eliminate the pointer itself.

All the engines share the same default intel_context, so we can just
keep a single reference to it in the dev_priv structure rather than one
in each of the engine[] elements. This make refcounting more sensible
too, as we now have a refcount of one for the one pointer, rather than
a refcount of one but multiple pointers.

From an idea by Chris Wilson.

v2:	transform an extra instance of ring->default_context introduced by
    42f1cae8 drm/i915: Restore inhibiting the load of the default context
    That patch's commentary includes:
	v2: Mark the global default context as uninitialized on GPU reset so
	    that the context-local workarounds are reloaded upon re-enabling
    The code implementing that now also benefits from the replacement of
    the multiple (per-ring) pointers to the default context with a single
    pointer to the unique kernel context.

v4:	Rebased, remove underused local (Nick Hoath)

Signed-off-by: default avatarDave Gordon <david.s.gordon@intel.com>
Reviewed-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-3-git-send-email-david.s.gordon@intel.com


Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 26827088
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1961,7 +1961,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
		seq_puts(m, "HW context ");
		describe_ctx(m, ctx);
		for_each_ring(ring, dev_priv, i) {
			if (ring->default_context == ctx)
			if (dev_priv->kernel_context == ctx)
				seq_printf(m, "(default context %s) ",
					   ring->name);
		}
@@ -2058,7 +2058,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)

	list_for_each_entry(ctx, &dev_priv->context_list, link) {
		for_each_ring(ring, dev_priv, i) {
			if (ring->default_context != ctx)
			if (dev_priv->kernel_context != ctx)
				i915_dump_lrc_obj(m, ctx, ring);
		}
	}
+2 −0
Original line number Diff line number Diff line
@@ -1948,6 +1948,8 @@ struct drm_i915_private {
		void (*stop_ring)(struct intel_engine_cs *ring);
	} gt;

	struct intel_context *kernel_context;

	bool edp_low_vswing;

	/* perform PHY state sanity checks? */
+3 −3
Original line number Diff line number Diff line
@@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref)

	if (ctx) {
		if (i915.enable_execlists) {
			if (ctx != req->ring->default_context)
			if (ctx != req->i915->kernel_context)
				intel_lr_context_unpin(req);
		}

@@ -2776,7 +2776,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
	int err;

	if (ctx == NULL)
		ctx = engine->default_context;
		ctx = to_i915(engine->dev)->kernel_context;
	err = __i915_gem_request_alloc(engine, ctx, &req);
	return err ? ERR_PTR(err) : req;
}
@@ -4864,7 +4864,7 @@ i915_gem_init_hw(struct drm_device *dev)
	 */
	init_unused_rings(dev);

	BUG_ON(!dev_priv->ring[RCS].default_context);
	BUG_ON(!dev_priv->kernel_context);

	ret = i915_ppgtt_init_hw(dev);
	if (ret) {
+11 −18
Original line number Diff line number Diff line
@@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev)
			i915_gem_context_unreference(lctx);
			ring->last_context = NULL;
		}
	}

	/* Force the GPU state to be reinitialised on enabling */
		if (ring->default_context)
			ring->default_context->legacy_hw_ctx.initialized = false;
	}
	dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}

int i915_gem_context_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_context *ctx;
	int i;

	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->ring[RCS].default_context))
	if (WARN_ON(dev_priv->kernel_context))
		return 0;

	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev)
		return PTR_ERR(ctx);
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_engine_cs *ring = &dev_priv->ring[i];

		/* NB: RCS will hold a ref for all rings */
		ring->default_context = ctx;
	}
	dev_priv->kernel_context = ctx;

	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
@@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
	struct intel_context *dctx = dev_priv->kernel_context;
	int i;

	if (dctx->legacy_hw_ctx.rcs_state) {
@@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev)
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
	for (i = I915_NUM_RINGS; --i >= 0;) {
		struct intel_engine_cs *ring = &dev_priv->ring[i];

		if (ring->last_context)
		if (ring->last_context) {
			i915_gem_context_unreference(ring->last_context);

		ring->default_context = NULL;
			ring->last_context = NULL;
		}
	}

	i915_gem_context_unreference(dctx);
	dev_priv->kernel_context = NULL;
}

int i915_gem_context_enable(struct drm_i915_gem_request *req)
+1 −1
Original line number Diff line number Diff line
@@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
			if (request)
				rbuf = request->ctx->engine[ring->id].ringbuf;
			else
				rbuf = ring->default_context->engine[ring->id].ringbuf;
				rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
		} else
			rbuf = ring->buffer;

Loading