Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37cd3300 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Remove redundant intel_autoenable_gt_powersave()



Now that we always execute a context switch upon module load, there is
no need to queue a delayed task for doing so. The purpose of the delayed
task is to enable GT powersaving, for which we need the HW state to be
valid (i.e. having loaded a context and initialised basic state). We
used to defer this operation as historically it was slow (due to slow
register polling, fixed with commit 1758b90e ("drm/i915: Use a hybrid
scheme for fast register waits")) but now we have a requirement to save
the default HW state.

v2: Load the kernel context (to provide the power context) upon resume.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171112112738.1463-3-chris@chris-wilson.co.uk
parent 556fe36d
Loading
Loading
Loading
Loading
+1 −10
Original line number Diff line number Diff line
@@ -1683,8 +1683,6 @@ static int i915_drm_resume(struct drm_device *dev)

	intel_csr_ucode_resume(dev_priv);

	i915_gem_resume(dev_priv);

	i915_restore_state(dev_priv);
	intel_pps_unlock_regs_wa(dev_priv);
	intel_opregion_setup(dev_priv);
@@ -1705,12 +1703,7 @@ static int i915_drm_resume(struct drm_device *dev)

	drm_mode_config_reset(dev);

	mutex_lock(&dev->struct_mutex);
	if (i915_gem_init_hw(dev_priv)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
		i915_gem_set_wedged(dev_priv);
	}
	mutex_unlock(&dev->struct_mutex);
	i915_gem_resume(dev_priv);

	intel_guc_resume(dev_priv);

@@ -1745,8 +1738,6 @@ static int i915_drm_resume(struct drm_device *dev)

	intel_opregion_notify_adapter(dev_priv, PCI_D0);

	intel_autoenable_gt_powersave(dev_priv);

	enable_rpm_wakeref_asserts(dev_priv);

	return 0;
+0 −1
Original line number Diff line number Diff line
@@ -1392,7 +1392,6 @@ struct intel_gen6_power_mgmt {
	struct intel_rps rps;
	struct intel_rc6 rc6;
	struct intel_llc_pstate llc_pstate;
	struct delayed_work autoenable_work;
};

/* defined intel_pm.c */
+23 −8
Original line number Diff line number Diff line
@@ -4806,23 +4806,38 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
	return ret;
}

void i915_gem_resume(struct drm_i915_private *dev_priv)
void i915_gem_resume(struct drm_i915_private *i915)
{
	struct drm_device *dev = &dev_priv->drm;
	WARN_ON(i915->gt.awake);

	WARN_ON(dev_priv->gt.awake);
	mutex_lock(&i915->drm.struct_mutex);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);

	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev_priv);
	i915_gem_restore_fences(dev_priv);
	i915_gem_restore_gtt_mappings(i915);
	i915_gem_restore_fences(i915);

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
	dev_priv->gt.resume(dev_priv);
	i915->gt.resume(i915);

	mutex_unlock(&dev->struct_mutex);
	if (i915_gem_init_hw(i915))
		goto err_wedged;

	/* Always reload a context for powersaving. */
	if (i915_gem_switch_to_kernel_context(i915))
		goto err_wedged;

out_unlock:
	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	mutex_unlock(&i915->drm.struct_mutex);
	return;

err_wedged:
	DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
	i915_gem_set_wedged(i915);
	goto out_unlock;
}

void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
+0 −1
Original line number Diff line number Diff line
@@ -1880,7 +1880,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
+0 −66
Original line number Diff line number Diff line
@@ -7959,8 +7959,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
	rps->boost_freq = rps->max_freq;

	mutex_unlock(&dev_priv->pcu_lock);

	intel_autoenable_gt_powersave(dev_priv);
}

void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7985,9 +7983,6 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
	if (INTEL_GEN(dev_priv) < 6)
		return;

	if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
		intel_runtime_pm_put(dev_priv);

	/* gen6_rps_idle() will be called later to disable interrupts */
}

@@ -8146,65 +8141,6 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
	mutex_unlock(&dev_priv->pcu_lock);
}

static void __intel_autoenable_gt_powersave(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work,
			     typeof(*dev_priv),
			     gt_pm.autoenable_work.work);
	struct intel_engine_cs *rcs;
	struct drm_i915_gem_request *req;

	rcs = dev_priv->engine[RCS];
	if (rcs->last_retired_context)
		goto out;

	if (!rcs->init_context)
		goto out;

	mutex_lock(&dev_priv->drm.struct_mutex);

	req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
	if (IS_ERR(req))
		goto unlock;

	if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
		rcs->init_context(req);

	/* Mark the device busy, calling intel_enable_gt_powersave() */
	i915_add_request(req);

unlock:
	mutex_unlock(&dev_priv->drm.struct_mutex);
out:
	intel_runtime_pm_put(dev_priv);
}

void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
{
	if (IS_IRONLAKE_M(dev_priv)) {
		ironlake_enable_drps(dev_priv);
		intel_init_emon(dev_priv);
	} else if (INTEL_INFO(dev_priv)->gen >= 6) {
		/*
		 * PCU communication is slow and this doesn't need to be
		 * done at any specific time, so do this out of our fast path
		 * to make resume and init faster.
		 *
		 * We depend on the HW RC6 power context save/restore
		 * mechanism when entering D3 through runtime PM suspend. So
		 * disable RPM until RPS/RC6 is properly setup. We can only
		 * get here via the driver load/system resume/runtime resume
		 * paths, so the _noresume version is enough (and in case of
		 * runtime resume it's necessary).
		 */
		if (queue_delayed_work(dev_priv->wq,
				       &dev_priv->gt_pm.autoenable_work,
				       round_jiffies_up_relative(HZ)))
			intel_runtime_pm_get_noresume(dev_priv);
	}
}

static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
	/*
@@ -9435,8 +9371,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
{
	mutex_init(&dev_priv->pcu_lock);

	INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
			  __intel_autoenable_gt_powersave);
	atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);

	dev_priv->runtime_pm.suspended = false;