Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b19f16a authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Drain the device workqueue on unload

Workers on the i915->wq may rearm themselves so for completeness we need
to replace our flush_workqueue() with a call to drain_workqueue() before
unloading the device.

v2: Reinforce the drain_workqueue with an preceding rcu_barrier() as a
few of the tasks that need to be drained may first be armed by RCU.

References: https://bugs.freedesktop.org/show_bug.cgi?id=101627


Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170718134124.14832-1-chris@chris-wilson.co.uk


Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
parent 023f8079
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -596,7 +596,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {

static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
	flush_workqueue(dev_priv->wq);
	/* Flush any outstanding unpin_work. */
	i915_gem_drain_workqueue(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_uc_fini_hw(dev_priv);
@@ -1413,9 +1414,6 @@ void i915_driver_unload(struct drm_device *dev)
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
	i915_reset_error_state(dev_priv);

	/* Flush any outstanding unpin_work. */
	drain_workqueue(dev_priv->wq);

	i915_gem_fini(dev_priv);
	intel_uc_fini_fw(dev_priv);
	intel_fbc_cleanup_cfb(dev_priv);
+20 −0
Original line number Diff line number Diff line
@@ -3300,6 +3300,26 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
	} while (flush_work(&i915->mm.free_work));
}

static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
{
	/*
	 * Similar to objects above (see i915_gem_drain_freed-objects), in
	 * general we have workers that are armed by RCU and then rearm
	 * themselves in their callbacks. To be paranoid, we need to
	 * drain the workqueue a second time after waiting for the RCU
	 * grace period so that we catch work queued via RCU from the first
	 * pass. As neither drain_workqueue() nor flush_workqueue() report
	 * a result, we make an assumption that we only don't require more
	 * than 2 passes to catch all recursive RCU delayed work.
	 *
	 */
	int pass = 2;
	do {
		rcu_barrier();
		drain_workqueue(i915->wq);
	} while (--pass);
}

struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
+1 −1
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@ static void mock_device_release(struct drm_device *dev)

	cancel_delayed_work_sync(&i915->gt.retire_work);
	cancel_delayed_work_sync(&i915->gt.idle_work);
	flush_workqueue(i915->wq);
	i915_gem_drain_workqueue(i915);

	mutex_lock(&i915->drm.struct_mutex);
	for_each_engine(engine, i915, id)