Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f817501 authored by Sagar Arun Kamble's avatar Sagar Arun Kamble Committed by Chris Wilson
Browse files

drm/i915: Move rps.hw_lock to dev_priv and s/hw_lock/pcu_lock



In order to separate GT PM related functionality into new structure
we are updating rps structure. hw_lock in it is used for display
related PCU communication too hence move it to dev_priv.

Signed-off-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-8-git-send-email-sagar.a.kamble@intel.com


Acked-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-7-chris@chris-wilson.co.uk
parent ad1443f0
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -1097,7 +1097,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		u32 rpmodectl, freq_sts;

		mutex_lock(&dev_priv->rps.hw_lock);
		mutex_lock(&dev_priv->pcu_lock);

		rpmodectl = I915_READ(GEN6_RP_CONTROL);
		seq_printf(m, "Video Turbo Mode: %s\n",
@@ -1130,7 +1130,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
		mutex_unlock(&dev_priv->rps.hw_lock);
		mutex_unlock(&dev_priv->pcu_lock);
	} else if (INTEL_GEN(dev_priv) >= 6) {
		u32 rp_state_limits;
		u32 gt_perf_status;
@@ -1565,9 +1565,9 @@ static int gen6_drpc_info(struct seq_file *m)
		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
	}

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	seq_printf(m, "RC1e Enabled: %s\n",
		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1842,7 +1842,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)

	intel_runtime_pm_get(dev_priv);

	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
	if (ret)
		goto out;

@@ -1873,7 +1873,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
			   ((ia_freq >> 8) & 0xff) * 100);
	}

	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

out:
	intel_runtime_pm_put(dev_priv);
@@ -4320,7 +4320,7 @@ i915_max_freq_set(void *data, u64 val)

	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);

	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
	if (ret)
		return ret;

@@ -4333,7 +4333,7 @@ i915_max_freq_set(void *data, u64 val)
	hw_min = dev_priv->rps.min_freq;

	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
		mutex_unlock(&dev_priv->rps.hw_lock);
		mutex_unlock(&dev_priv->pcu_lock);
		return -EINVAL;
	}

@@ -4342,7 +4342,7 @@ i915_max_freq_set(void *data, u64 val)
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");

	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	return 0;
}
@@ -4375,7 +4375,7 @@ i915_min_freq_set(void *data, u64 val)

	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);

	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
	if (ret)
		return ret;

@@ -4389,7 +4389,7 @@ i915_min_freq_set(void *data, u64 val)

	if (val < hw_min ||
	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
		mutex_unlock(&dev_priv->rps.hw_lock);
		mutex_unlock(&dev_priv->pcu_lock);
		return -EINVAL;
	}

@@ -4398,7 +4398,7 @@ i915_min_freq_set(void *data, u64 val)
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");

	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	return 0;
}
+8 −8
Original line number Diff line number Diff line
@@ -1364,14 +1364,6 @@ struct intel_gen6_power_mgmt {

	/* manual wa residency calculations */
	struct intel_rps_ei ei;

	/*
	 * Protects RPS/RC6 register access and PCU communication.
	 * Must be taken after struct_mutex if nested. Note that
	 * this lock may be held for long periods of time when
	 * talking to hw - so only take it when talking to hw!
	 */
	struct mutex hw_lock;
};

/* defined intel_pm.c */
@@ -2421,6 +2413,14 @@ struct drm_i915_private {
	/* Cannot be determined by PCIID. You must always read a register. */
	u32 edram_cap;

	/*
	 * Protects RPS/RC6 register access and PCU communication.
	 * Must be taken after struct_mutex if nested. Note that
	 * this lock may be held for long periods of time when
	 * talking to hw - so only take it when talking to hw!
	 */
	struct mutex pcu_lock;

	/* gen6+ rps state */
	struct intel_gen6_power_mgmt rps;

+2 −2
Original line number Diff line number Diff line
@@ -1181,7 +1181,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
		goto out;

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);

	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

@@ -1235,7 +1235,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
		dev_priv->rps.last_adj = 0;
	}

	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+10 −10
Original line number Diff line number Diff line
@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		u32 freq;
		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
			ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
		ret = intel_gpu_freq(dev_priv, ret);
	}
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_runtime_pm_put(dev_priv);

@@ -304,9 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
	if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
		return -EINVAL;

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	dev_priv->rps.boost_freq = val;
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	return count;
}
@@ -344,14 +344,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);

	val = intel_freq_opcode(dev_priv, val);

	if (val < dev_priv->rps.min_freq ||
	    val > dev_priv->rps.max_freq ||
	    val < dev_priv->rps.min_freq_softlimit) {
		mutex_unlock(&dev_priv->rps.hw_lock);
		mutex_unlock(&dev_priv->pcu_lock);
		intel_runtime_pm_put(dev_priv);
		return -EINVAL;
	}
@@ -371,7 +371,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
	 * frequency request may be unchanged. */
	ret = intel_set_rps(dev_priv, val);

	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_runtime_pm_put(dev_priv);

@@ -401,14 +401,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);

	val = intel_freq_opcode(dev_priv, val);

	if (val < dev_priv->rps.min_freq ||
	    val > dev_priv->rps.max_freq ||
	    val > dev_priv->rps.max_freq_softlimit) {
		mutex_unlock(&dev_priv->rps.hw_lock);
		mutex_unlock(&dev_priv->pcu_lock);
		intel_runtime_pm_put(dev_priv);
		return -EINVAL;
	}
@@ -424,7 +424,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
	 * frequency request may be unchanged. */
	ret = intel_set_rps(dev_priv, val);

	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_runtime_pm_put(dev_priv);

+20 −20
Original line number Diff line number Diff line
@@ -503,7 +503,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
	else
		cmd = 0;

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
	val &= ~DSPFREQGUAR_MASK;
	val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -513,7 +513,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
		     50)) {
		DRM_ERROR("timed out waiting for CDclk change\n");
	}
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	mutex_lock(&dev_priv->sb_lock);

@@ -590,7 +590,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
	 */
	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
	val &= ~DSPFREQGUAR_MASK_CHV;
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
@@ -600,7 +600,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
		     50)) {
		DRM_ERROR("timed out waiting for CDclk change\n");
	}
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);

@@ -656,10 +656,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
		 "trying to change cdclk frequency with cdclk not enabled\n"))
		return;

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	ret = sandybridge_pcode_write(dev_priv,
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("failed to inform pcode about cdclk change\n");
		return;
@@ -712,9 +712,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
		DRM_ERROR("Switching back to LCPLL failed\n");

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);

@@ -928,12 +928,12 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,

	WARN_ON((cdclk == 24000) != (vco == 0));

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				SKL_CDCLK_PREPARE_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE, 3);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
			  ret);
@@ -975,9 +975,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
	POSTING_READ(CDCLK_CTL);

	/* inform PCU of the change */
	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);
}
@@ -1268,10 +1268,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
	}

	/* Inform power controller of upcoming frequency change */
	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
				      0x80000000);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	if (ret) {
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
@@ -1300,10 +1300,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
		val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
	I915_WRITE(CDCLK_CTL, val);

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
				      DIV_ROUND_UP(cdclk, 25000));
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	if (ret) {
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
@@ -1518,12 +1518,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
	u32 val, divider, pcu_ack;
	int ret;

	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				SKL_CDCLK_PREPARE_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE, 3);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);
	if (ret) {
		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
			  ret);
@@ -1575,9 +1575,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
	I915_WRITE(CDCLK_CTL, val);

	/* inform PCU of the change */
	mutex_lock(&dev_priv->rps.hw_lock);
	mutex_lock(&dev_priv->pcu_lock);
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
	mutex_unlock(&dev_priv->rps.hw_lock);
	mutex_unlock(&dev_priv->pcu_lock);

	intel_update_cdclk(dev_priv);
}
Loading