Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dcff85c8 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Enable i915_gem_wait_for_idle() without holding struct_mutex



The principal motivation for this was to try and eliminate the
struct_mutex from i915_gem_suspend - but we still need to hold the mutex
current for the i915_gem_context_lost(). (The issue there is that there
may be an indirect lockdep cycle between cpu_hotplug (i.e. suspend) and
struct_mutex via the stop_machine().) For the moment, enabling last
request tracking for the engine, allows us to do busyness checking and
waiting without requiring the struct_mutex - which is useful in its own
right.

As a side-effect of having a robust means for tracking engine busyness,
we can replace our other busyness heuristic, that of comparing against
the last submitted seqno. For paranoid reasons, we have a semi-ordered
check of that seqno inside the hangchecker, which we can now improve to
an ordered check of the engine's busyness (removing a locked xchg in the
process).

v2: Pass along "bool interruptible" as being unlocked we cannot rely on
i915->mm.interruptible being stable or even under our control.
v3: Replace check Ironlake i915_gpu_busy() with the common precalculated value

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470388464-28458-6-git-send-email-chris@chris-wilson.co.uk
parent 90f4fcd5
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -4925,7 +4925,7 @@ i915_drop_caches_set(void *data, u64 val)
		return ret;

	if (val & DROP_ACTIVE) {
		ret = i915_gem_wait_for_idle(dev_priv);
		ret = i915_gem_wait_for_idle(dev_priv, true);
		if (ret)
			goto unlock;
	}
+2 −1
Original line number Diff line number Diff line
@@ -3233,7 +3233,8 @@ int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
					bool interruptible);
int __must_check i915_gem_suspend(struct drm_device *dev);
void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+14 −17
Original line number Diff line number Diff line
@@ -2438,13 +2438,18 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)

static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
{
	struct drm_i915_gem_request *request;
	struct intel_ring *ring;

	request = i915_gem_active_peek(&engine->last_request,
				       &engine->i915->drm.struct_mutex);

	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
	if (request)
		intel_engine_init_seqno(engine, request->fence.seqno);

	/*
	 * Clear the execlists queue up before freeing the requests, as those
@@ -2466,15 +2471,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
	 * implicit references on things like e.g. ppgtt address spaces through
	 * the request.
	 */
	if (!list_empty(&engine->request_list)) {
		struct drm_i915_gem_request *request;

		request = list_last_entry(&engine->request_list,
					  struct drm_i915_gem_request,
					  link);

	if (request)
		i915_gem_request_retire_upto(request);
	}
	GEM_BUG_ON(intel_engine_is_active(engine));

	/* Having flushed all requests from all queues, we know that all
	 * ringbuffers must now be empty. However, since we do not reclaim
@@ -2897,18 +2896,17 @@ int i915_vma_unbind(struct i915_vma *vma)
	return 0;
}

int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
			   bool interruptible)
{
	struct intel_engine_cs *engine;
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	for_each_engine(engine, dev_priv) {
		if (engine->last_context == NULL)
			continue;

		ret = intel_engine_idle(engine);
		ret = intel_engine_idle(engine, interruptible);
		if (ret)
			return ret;
	}
@@ -4080,11 +4078,10 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
	return NULL;
}

int
i915_gem_suspend(struct drm_device *dev)
int i915_gem_suspend(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	int ret = 0;
	int ret;

	intel_suspend_gt_powersave(dev_priv);

@@ -4102,7 +4099,7 @@ i915_gem_suspend(struct drm_device *dev)
	if (ret)
		goto err;

	ret = i915_gem_wait_for_idle(dev_priv);
	ret = i915_gem_wait_for_idle(dev_priv, true);
	if (ret)
		goto err;

+3 −3
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv) {
		if (!list_empty(&engine->request_list))
		if (intel_engine_is_active(engine))
			return false;
	}

@@ -167,7 +167,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
	if (ret)
		return ret;

	ret = i915_gem_wait_for_idle(dev_priv);
	ret = i915_gem_wait_for_idle(dev_priv, true);
	if (ret)
		return ret;

@@ -272,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
				return ret;
		}

		ret = i915_gem_wait_for_idle(dev_priv);
		ret = i915_gem_wait_for_idle(dev_priv, true);
		if (ret)
			return ret;

+1 −1
Original line number Diff line number Diff line
@@ -2248,7 +2248,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)

	if (unlikely(ggtt->do_idle_maps)) {
		dev_priv->mm.interruptible = false;
		if (i915_gem_wait_for_idle(dev_priv)) {
		if (i915_gem_wait_for_idle(dev_priv, false)) {
			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
Loading