Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ebb5eb7d authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Replace pcu_lock with sb_lock



We now have two locks for sideband access. The general one covering
sideband access across all generation, sb_lock, and a specific one
covering sideband access via the punit on vlv/chv. After lifting the
sb_lock around the punit into the callers, the pcu_lock is now redudant
and can be separated from its other use to regulate RPS (essentially
giving RPS a lock all of its own).

v2: Extract a couple of minor bug fixes.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190426081725.31217-4-chris@chris-wilson.co.uk
parent 337fa6e0
Loading
Loading
Loading
Loading
+4 −21
Original line number Diff line number Diff line
@@ -1046,8 +1046,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		u32 rpmodectl, freq_sts;

		mutex_lock(&dev_priv->pcu_lock);

		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		seq_printf(m, "Video Turbo Mode: %s\n",
			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -1082,7 +1080,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, rps->efficient_freq));
		mutex_unlock(&dev_priv->pcu_lock);
	} else if (INTEL_GEN(dev_priv) >= 6) {
		u32 rp_state_limits;
		u32 gt_perf_status;
@@ -1487,12 +1484,9 @@ static int gen6_drpc_info(struct seq_file *m)
		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
	}

	if (INTEL_GEN(dev_priv) <= 7) {
		mutex_lock(&dev_priv->pcu_lock);
	if (INTEL_GEN(dev_priv) <= 7)
		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
				       &rc6vids);
		mutex_unlock(&dev_priv->pcu_lock);
	}

	seq_printf(m, "RC1e Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1756,17 +1750,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
	unsigned int max_gpu_freq, min_gpu_freq;
	intel_wakeref_t wakeref;
	int gpu_freq, ia_freq;
	int ret;

	if (!HAS_LLC(dev_priv))
		return -ENODEV;

	wakeref = intel_runtime_pm_get(dev_priv);

	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
	if (ret)
		goto out;

	min_gpu_freq = rps->min_freq;
	max_gpu_freq = rps->max_freq;
	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
@@ -1777,6 +1764,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)

	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");

	wakeref = intel_runtime_pm_get(dev_priv);
	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
		ia_freq = gpu_freq;
		sandybridge_pcode_read(dev_priv,
@@ -1790,12 +1778,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
	}

	mutex_unlock(&dev_priv->pcu_lock);

out:
	intel_runtime_pm_put(dev_priv, wakeref);
	return ret;

	return 0;
}

static int i915_opregion(struct seq_file *m, void *unused)
@@ -2032,13 +2017,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)

	with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
			mutex_lock(&dev_priv->pcu_lock);
			vlv_punit_get(dev_priv);
			act_freq = vlv_punit_read(dev_priv,
						  PUNIT_REG_GPU_FREQ_STS);
			vlv_punit_put(dev_priv);
			act_freq = (act_freq >> 8) & 0xff;
			mutex_unlock(&dev_priv->pcu_lock);
		} else {
			act_freq = intel_get_cagf(dev_priv,
						  I915_READ(GEN6_RPSTAT1));
+2 −8
Original line number Diff line number Diff line
@@ -648,6 +648,8 @@ struct intel_rps_ei {
};

struct intel_rps {
	struct mutex lock; /* protects enabling and the worker */

	/*
	 * work, interrupts_enabled and pm_iir are protected by
	 * dev_priv->irq_lock
@@ -1710,14 +1712,6 @@ struct drm_i915_private {
	 */
	u32 edram_size_mb;

	/*
	 * Protects RPS/RC6 register access and PCU communication.
	 * Must be taken after struct_mutex if nested. Note that
	 * this lock may be held for long periods of time when
	 * talking to hw - so only take it when talking to hw!
	 */
	struct mutex pcu_lock;

	/* gen6+ GT PM state */
	struct intel_gen6_power_mgmt gt_pm;

+2 −2
Original line number Diff line number Diff line
@@ -1301,7 +1301,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
		goto out;

	mutex_lock(&dev_priv->pcu_lock);
	mutex_lock(&rps->lock);

	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

@@ -1367,7 +1367,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
		rps->last_adj = 0;
	}

	mutex_unlock(&dev_priv->pcu_lock);
	mutex_unlock(&rps->lock);

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+12 −20
Original line number Diff line number Diff line
@@ -263,7 +263,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,

	wakeref = intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->pcu_lock);
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		vlv_punit_get(dev_priv);
		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -273,7 +272,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
	} else {
		freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
	}
	mutex_unlock(&dev_priv->pcu_lock);

	intel_runtime_pm_put(dev_priv, wakeref);

@@ -318,12 +316,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
	if (val < rps->min_freq || val > rps->max_freq)
		return -EINVAL;

	mutex_lock(&dev_priv->pcu_lock);
	mutex_lock(&rps->lock);
	if (val != rps->boost_freq) {
		rps->boost_freq = val;
		boost = atomic_read(&rps->num_waiters);
	}
	mutex_unlock(&dev_priv->pcu_lock);
	mutex_unlock(&rps->lock);
	if (boost)
		schedule_work(&rps->work);

@@ -364,17 +362,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
		return ret;

	wakeref = intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->pcu_lock);
	mutex_lock(&rps->lock);

	val = intel_freq_opcode(dev_priv, val);

	if (val < rps->min_freq ||
	    val > rps->max_freq ||
	    val < rps->min_freq_softlimit) {
		mutex_unlock(&dev_priv->pcu_lock);
		intel_runtime_pm_put(dev_priv, wakeref);
		return -EINVAL;
		ret = -EINVAL;
		goto unlock;
	}

	if (val > rps->rp0_freq)
@@ -392,8 +387,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
	 * frequency request may be unchanged. */
	ret = intel_set_rps(dev_priv, val);

	mutex_unlock(&dev_priv->pcu_lock);

unlock:
	mutex_unlock(&rps->lock);
	intel_runtime_pm_put(dev_priv, wakeref);

	return ret ?: count;
@@ -423,17 +418,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
		return ret;

	wakeref = intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->pcu_lock);
	mutex_lock(&rps->lock);

	val = intel_freq_opcode(dev_priv, val);

	if (val < rps->min_freq ||
	    val > rps->max_freq ||
	    val > rps->max_freq_softlimit) {
		mutex_unlock(&dev_priv->pcu_lock);
		intel_runtime_pm_put(dev_priv, wakeref);
		return -EINVAL;
		ret = -EINVAL;
		goto unlock;
	}

	rps->min_freq_softlimit = val;
@@ -447,8 +439,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
	 * frequency request may be unchanged. */
	ret = intel_set_rps(dev_priv, val);

	mutex_unlock(&dev_priv->pcu_lock);

unlock:
	mutex_unlock(&rps->lock);
	intel_runtime_pm_put(dev_priv, wakeref);

	return ret ?: count;
+0 −28
Original line number Diff line number Diff line
@@ -464,7 +464,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
{
	u32 val;

	mutex_lock(&dev_priv->pcu_lock);
	vlv_iosf_sb_get(dev_priv,
			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));

@@ -477,7 +476,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,

	vlv_iosf_sb_put(dev_priv,
			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
	mutex_unlock(&dev_priv->pcu_lock);

	if (IS_VALLEYVIEW(dev_priv))
		cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
@@ -556,7 +554,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
			BIT(VLV_IOSF_SB_BUNIT) |
			BIT(VLV_IOSF_SB_PUNIT));

	mutex_lock(&dev_priv->pcu_lock);
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
	val &= ~DSPFREQGUAR_MASK;
	val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -566,7 +563,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
		     50)) {
		DRM_ERROR("timed out waiting for CDclk change\n");
	}
	mutex_unlock(&dev_priv->pcu_lock);

	if (cdclk == 400000) {
		u32 divider;
@@ -639,7 +635,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
	 */
	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);

	mutex_lock(&dev_priv->pcu_lock);
	vlv_punit_get(dev_priv);
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
	val &= ~DSPFREQGUAR_MASK_CHV;
@@ -652,7 +647,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
	}

	vlv_punit_put(dev_priv);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);

@@ -731,10 +725,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
		 "trying to change cdclk frequency with cdclk not enabled\n"))
		return;

	mutex_lock(&dev_priv->pcu_lock);
	ret = sandybridge_pcode_write(dev_priv,
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("failed to inform pcode about cdclk change\n");
		return;
@@ -783,10 +775,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
		DRM_ERROR("Switching back to LCPLL failed\n");

	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
				cdclk_state->voltage_level);
	mutex_unlock(&dev_priv->pcu_lock);

	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);

@@ -1025,12 +1015,10 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
	 */
	WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);

	mutex_lock(&dev_priv->pcu_lock);
	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				SKL_CDCLK_PREPARE_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE, 3);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
			  ret);
@@ -1094,10 +1082,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
	POSTING_READ(CDCLK_CTL);

	/* inform PCU of the change */
	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				cdclk_state->voltage_level);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);
}
@@ -1394,12 +1380,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
	 * requires us to wait up to 150usec, but that leads to timeouts;
	 * the 2ms used here is based on experiment.
	 */
	mutex_lock(&dev_priv->pcu_lock);
	ret = sandybridge_pcode_write_timeout(dev_priv,
					      HSW_PCODE_DE_WRITE_FREQ_REQ,
					      0x80000000, 150, 2);
	mutex_unlock(&dev_priv->pcu_lock);

	if (ret) {
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
			  ret, cdclk);
@@ -1429,7 +1412,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
	if (pipe != INVALID_PIPE)
		intel_wait_for_vblank(dev_priv, pipe);

	mutex_lock(&dev_priv->pcu_lock);
	/*
	 * The timeout isn't specified, the 2ms used here is based on
	 * experiment.
@@ -1439,8 +1421,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
	ret = sandybridge_pcode_write_timeout(dev_priv,
					      HSW_PCODE_DE_WRITE_FREQ_REQ,
					      cdclk_state->voltage_level, 150, 2);
	mutex_unlock(&dev_priv->pcu_lock);

	if (ret) {
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
			  ret, cdclk);
@@ -1663,12 +1643,10 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
	u32 val, divider;
	int ret;

	mutex_lock(&dev_priv->pcu_lock);
	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				SKL_CDCLK_PREPARE_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE, 3);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
			  ret);
@@ -1707,10 +1685,8 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
		intel_wait_for_vblank(dev_priv, pipe);

	/* inform PCU of the change */
	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				cdclk_state->voltage_level);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);

@@ -1849,12 +1825,10 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
	unsigned int vco = cdclk_state->vco;
	int ret;

	mutex_lock(&dev_priv->pcu_lock);
	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				SKL_CDCLK_PREPARE_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE, 3);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
			  ret);
@@ -1876,10 +1850,8 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
	I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
			      skl_cdclk_decimal(cdclk));

	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				cdclk_state->voltage_level);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);

Loading