Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 562d9bae authored by Sagar Arun Kamble's avatar Sagar Arun Kamble Committed by Chris Wilson
Browse files

drm/i915: Name structure in dev_priv that contains RPS/RC6 state as "gt_pm"



Prepared substructure rps for RPS related state. autoenable_work is
used for RC6 too hence it is defined outside rps structure. As we do
this lot many functions are refactored to use intel_rps *rps to access
rps related members. Hence renamed intel_rps_client pointer variables
to rps_client in various functions.

v2: Rebase.

v3: s/pm/gt_pm (Chris)
Refactored access to rps structure by declaring struct intel_rps * in
many functions.

Signed-off-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com> #1
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-9-git-send-email-sagar.a.kamble@intel.com


Acked-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-8-chris@chris-wilson.co.uk
parent 9f817501
Loading
Loading
Loading
Loading
+50 −49
Original line number Original line Diff line number Diff line
@@ -1080,6 +1080,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
static int i915_frequency_info(struct seq_file *m, void *unused)
static int i915_frequency_info(struct seq_file *m, void *unused)
{
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	int ret = 0;
	int ret = 0;


	intel_runtime_pm_get(dev_priv);
	intel_runtime_pm_get(dev_priv);
@@ -1116,20 +1117,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));


		seq_printf(m, "current GPU freq: %d MHz\n",
		seq_printf(m, "current GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
			   intel_gpu_freq(dev_priv, rps->cur_freq));


		seq_printf(m, "max GPU freq: %d MHz\n",
		seq_printf(m, "max GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
			   intel_gpu_freq(dev_priv, rps->max_freq));


		seq_printf(m, "min GPU freq: %d MHz\n",
		seq_printf(m, "min GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
			   intel_gpu_freq(dev_priv, rps->min_freq));


		seq_printf(m, "idle GPU freq: %d MHz\n",
		seq_printf(m, "idle GPU freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
			   intel_gpu_freq(dev_priv, rps->idle_freq));


		seq_printf(m,
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
			   intel_gpu_freq(dev_priv, rps->efficient_freq));
		mutex_unlock(&dev_priv->pcu_lock);
		mutex_unlock(&dev_priv->pcu_lock);
	} else if (INTEL_GEN(dev_priv) >= 6) {
	} else if (INTEL_GEN(dev_priv) >= 6) {
		u32 rp_state_limits;
		u32 rp_state_limits;
@@ -1210,7 +1211,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
			   dev_priv->rps.pm_intrmsk_mbz);
			   rps->pm_intrmsk_mbz);
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
		seq_printf(m, "Render p-state ratio: %d\n",
		seq_printf(m, "Render p-state ratio: %d\n",
			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
@@ -1230,8 +1231,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
		seq_printf(m, "RP PREV UP: %d (%dus)\n",
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
		seq_printf(m, "Up threshold: %d%%\n",
		seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
			   dev_priv->rps.up_threshold);


		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1239,8 +1239,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
		seq_printf(m, "Down threshold: %d%%\n",
		seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
			   dev_priv->rps.down_threshold);


		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
			    rp_state_cap >> 16) & 0xff;
			    rp_state_cap >> 16) & 0xff;
@@ -1262,22 +1261,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
			   intel_gpu_freq(dev_priv, max_freq));
			   intel_gpu_freq(dev_priv, max_freq));
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
		seq_printf(m, "Max overclocked frequency: %dMHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
			   intel_gpu_freq(dev_priv, rps->max_freq));


		seq_printf(m, "Current freq: %d MHz\n",
		seq_printf(m, "Current freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
			   intel_gpu_freq(dev_priv, rps->cur_freq));
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
		seq_printf(m, "Actual freq: %d MHz\n", cagf);
		seq_printf(m, "Idle freq: %d MHz\n",
		seq_printf(m, "Idle freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
			   intel_gpu_freq(dev_priv, rps->idle_freq));
		seq_printf(m, "Min freq: %d MHz\n",
		seq_printf(m, "Min freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
			   intel_gpu_freq(dev_priv, rps->min_freq));
		seq_printf(m, "Boost freq: %d MHz\n",
		seq_printf(m, "Boost freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
			   intel_gpu_freq(dev_priv, rps->boost_freq));
		seq_printf(m, "Max freq: %d MHz\n",
		seq_printf(m, "Max freq: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
			   intel_gpu_freq(dev_priv, rps->max_freq));
		seq_printf(m,
		seq_printf(m,
			   "efficient (RPe) frequency: %d MHz\n",
			   "efficient (RPe) frequency: %d MHz\n",
			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
			   intel_gpu_freq(dev_priv, rps->efficient_freq));
	} else {
	} else {
		seq_puts(m, "no P-state info available\n");
		seq_puts(m, "no P-state info available\n");
	}
	}
@@ -1831,6 +1830,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	int ret = 0;
	int ret = 0;
	int gpu_freq, ia_freq;
	int gpu_freq, ia_freq;
	unsigned int max_gpu_freq, min_gpu_freq;
	unsigned int max_gpu_freq, min_gpu_freq;
@@ -1848,13 +1848,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)


	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
		/* Convert GT frequency to 50 HZ units */
		/* Convert GT frequency to 50 HZ units */
		min_gpu_freq =
		min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
		max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
		max_gpu_freq =
			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
	} else {
	} else {
		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
		min_gpu_freq = rps->min_freq_softlimit;
		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
		max_gpu_freq = rps->max_freq_softlimit;
	}
	}


	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -2307,25 +2305,26 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
	struct drm_device *dev = &dev_priv->drm;
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	struct drm_file *file;
	struct drm_file *file;


	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
	seq_printf(m, "GPU busy? %s [%d requests]\n",
	seq_printf(m, "GPU busy? %s [%d requests]\n",
		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
	seq_printf(m, "Boosts outstanding? %d\n",
	seq_printf(m, "Boosts outstanding? %d\n",
		   atomic_read(&dev_priv->rps.num_waiters));
		   atomic_read(&rps->num_waiters));
	seq_printf(m, "Frequency requested %d\n",
	seq_printf(m, "Frequency requested %d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
		   intel_gpu_freq(dev_priv, rps->cur_freq));
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
		   intel_gpu_freq(dev_priv, rps->min_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
		   intel_gpu_freq(dev_priv, rps->max_freq));
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
		   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
		   intel_gpu_freq(dev_priv, rps->idle_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
		   intel_gpu_freq(dev_priv, rps->efficient_freq),
		   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
		   intel_gpu_freq(dev_priv, rps->boost_freq));


	mutex_lock(&dev->filelist_mutex);
	mutex_lock(&dev->filelist_mutex);
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
@@ -2337,15 +2336,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
		seq_printf(m, "%s [%d]: %d boosts\n",
		seq_printf(m, "%s [%d]: %d boosts\n",
			   task ? task->comm : "<unknown>",
			   task ? task->comm : "<unknown>",
			   task ? task->pid : -1,
			   task ? task->pid : -1,
			   atomic_read(&file_priv->rps.boosts));
			   atomic_read(&file_priv->rps_client.boosts));
		rcu_read_unlock();
		rcu_read_unlock();
	}
	}
	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
		   atomic_read(&dev_priv->rps.boosts));
		   atomic_read(&rps->boosts));
	mutex_unlock(&dev->filelist_mutex);
	mutex_unlock(&dev->filelist_mutex);


	if (INTEL_GEN(dev_priv) >= 6 &&
	if (INTEL_GEN(dev_priv) >= 6 &&
	    dev_priv->rps.enabled &&
	    rps->enabled &&
	    dev_priv->gt.active_requests) {
	    dev_priv->gt.active_requests) {
		u32 rpup, rpupei;
		u32 rpup, rpupei;
		u32 rpdown, rpdownei;
		u32 rpdown, rpdownei;
@@ -2358,13 +2357,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);


		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
			   rps_power_to_str(dev_priv->rps.power));
			   rps_power_to_str(rps->power));
		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
			   rpup && rpupei ? 100 * rpup / rpupei : 0,
			   rpup && rpupei ? 100 * rpup / rpupei : 0,
			   dev_priv->rps.up_threshold);
			   rps->up_threshold);
		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
			   dev_priv->rps.down_threshold);
			   rps->down_threshold);
	} else {
	} else {
		seq_puts(m, "\nRPS Autotuning inactive\n");
		seq_puts(m, "\nRPS Autotuning inactive\n");
	}
	}
@@ -4304,7 +4303,7 @@ i915_max_freq_get(void *data, u64 *val)
	if (INTEL_GEN(dev_priv) < 6)
	if (INTEL_GEN(dev_priv) < 6)
		return -ENODEV;
		return -ENODEV;


	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
	return 0;
	return 0;
}
}


@@ -4312,6 +4311,7 @@ static int
i915_max_freq_set(void *data, u64 val)
i915_max_freq_set(void *data, u64 val)
{
{
	struct drm_i915_private *dev_priv = data;
	struct drm_i915_private *dev_priv = data;
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	u32 hw_max, hw_min;
	u32 hw_max, hw_min;
	int ret;
	int ret;


@@ -4329,15 +4329,15 @@ i915_max_freq_set(void *data, u64 val)
	 */
	 */
	val = intel_freq_opcode(dev_priv, val);
	val = intel_freq_opcode(dev_priv, val);


	hw_max = dev_priv->rps.max_freq;
	hw_max = rps->max_freq;
	hw_min = dev_priv->rps.min_freq;
	hw_min = rps->min_freq;


	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
	if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
		mutex_unlock(&dev_priv->pcu_lock);
		mutex_unlock(&dev_priv->pcu_lock);
		return -EINVAL;
		return -EINVAL;
	}
	}


	dev_priv->rps.max_freq_softlimit = val;
	rps->max_freq_softlimit = val;


	if (intel_set_rps(dev_priv, val))
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
@@ -4359,7 +4359,7 @@ i915_min_freq_get(void *data, u64 *val)
	if (INTEL_GEN(dev_priv) < 6)
	if (INTEL_GEN(dev_priv) < 6)
		return -ENODEV;
		return -ENODEV;


	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
	return 0;
	return 0;
}
}


@@ -4367,6 +4367,7 @@ static int
i915_min_freq_set(void *data, u64 val)
i915_min_freq_set(void *data, u64 val)
{
{
	struct drm_i915_private *dev_priv = data;
	struct drm_i915_private *dev_priv = data;
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	u32 hw_max, hw_min;
	u32 hw_max, hw_min;
	int ret;
	int ret;


@@ -4384,16 +4385,16 @@ i915_min_freq_set(void *data, u64 val)
	 */
	 */
	val = intel_freq_opcode(dev_priv, val);
	val = intel_freq_opcode(dev_priv, val);


	hw_max = dev_priv->rps.max_freq;
	hw_max = rps->max_freq;
	hw_min = dev_priv->rps.min_freq;
	hw_min = rps->min_freq;


	if (val < hw_min ||
	if (val < hw_min ||
	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
	    val > hw_max || val > rps->max_freq_softlimit) {
		mutex_unlock(&dev_priv->pcu_lock);
		mutex_unlock(&dev_priv->pcu_lock);
		return -EINVAL;
		return -EINVAL;
	}
	}


	dev_priv->rps.min_freq_softlimit = val;
	rps->min_freq_softlimit = val;


	if (intel_set_rps(dev_priv, val))
	if (intel_set_rps(dev_priv, val))
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
+1 −1
Original line number Original line Diff line number Diff line
@@ -2502,7 +2502,7 @@ static int intel_runtime_suspend(struct device *kdev)
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_private *dev_priv = to_i915(dev);
	int ret;
	int ret;


	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rps.enabled && intel_enable_rc6())))
		return -ENODEV;
		return -ENODEV;


	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+9 −5
Original line number Original line Diff line number Diff line
@@ -609,7 +609,7 @@ struct drm_i915_file_private {


	struct intel_rps_client {
	struct intel_rps_client {
		atomic_t boosts;
		atomic_t boosts;
	} rps;
	} rps_client;


	unsigned int bsd_engine;
	unsigned int bsd_engine;


@@ -1317,7 +1317,7 @@ struct intel_rps_ei {
	u32 media_c0;
	u32 media_c0;
};
};


struct intel_gen6_power_mgmt {
struct intel_rps {
	/*
	/*
	 * work, interrupts_enabled and pm_iir are protected by
	 * work, interrupts_enabled and pm_iir are protected by
	 * dev_priv->irq_lock
	 * dev_priv->irq_lock
@@ -1358,7 +1358,6 @@ struct intel_gen6_power_mgmt {
	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;


	bool enabled;
	bool enabled;
	struct delayed_work autoenable_work;
	atomic_t num_waiters;
	atomic_t num_waiters;
	atomic_t boosts;
	atomic_t boosts;


@@ -1366,6 +1365,11 @@ struct intel_gen6_power_mgmt {
	struct intel_rps_ei ei;
	struct intel_rps_ei ei;
};
};


struct intel_gen6_power_mgmt {
	struct intel_rps rps;
	struct delayed_work autoenable_work;
};

/* defined intel_pm.c */
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;
extern spinlock_t mchdev_lock;


@@ -2421,8 +2425,8 @@ struct drm_i915_private {
	 */
	 */
	struct mutex pcu_lock;
	struct mutex pcu_lock;


	/* gen6+ rps state */
	/* gen6+ GT PM state */
	struct intel_gen6_power_mgmt rps;
	struct intel_gen6_power_mgmt gt_pm;


	/* ilk-only ips/rps state. Everything in here is protected by the global
	/* ilk-only ips/rps state. Everything in here is protected by the global
	 * mchdev_lock in intel_pm.c */
	 * mchdev_lock in intel_pm.c */
+11 −10
Original line number Original line Diff line number Diff line
@@ -358,7 +358,7 @@ static long
i915_gem_object_wait_fence(struct dma_fence *fence,
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   unsigned int flags,
			   long timeout,
			   long timeout,
			   struct intel_rps_client *rps)
			   struct intel_rps_client *rps_client)
{
{
	struct drm_i915_gem_request *rq;
	struct drm_i915_gem_request *rq;


@@ -391,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
	 * forcing the clocks too high for the whole system, we only allow
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 * each client to waitboost once in a busy period.
	 */
	 */
	if (rps) {
	if (rps_client) {
		if (INTEL_GEN(rq->i915) >= 6)
		if (INTEL_GEN(rq->i915) >= 6)
			gen6_rps_boost(rq, rps);
			gen6_rps_boost(rq, rps_client);
		else
		else
			rps = NULL;
			rps_client = NULL;
	}
	}


	timeout = i915_wait_request(rq, flags, timeout);
	timeout = i915_wait_request(rq, flags, timeout);
@@ -411,7 +411,7 @@ static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 unsigned int flags,
				 long timeout,
				 long timeout,
				 struct intel_rps_client *rps)
				 struct intel_rps_client *rps_client)
{
{
	unsigned int seq = __read_seqcount_begin(&resv->seq);
	unsigned int seq = __read_seqcount_begin(&resv->seq);
	struct dma_fence *excl;
	struct dma_fence *excl;
@@ -430,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
		for (i = 0; i < count; i++) {
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
							     flags, timeout,
							     rps);
							     rps_client);
			if (timeout < 0)
			if (timeout < 0)
				break;
				break;


@@ -447,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
	}
	}


	if (excl && timeout >= 0) {
	if (excl && timeout >= 0) {
		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
						     rps_client);
		prune_fences = timeout >= 0;
		prune_fences = timeout >= 0;
	}
	}


@@ -543,7 +544,7 @@ int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     unsigned int flags,
		     long timeout,
		     long timeout,
		     struct intel_rps_client *rps)
		     struct intel_rps_client *rps_client)
{
{
	might_sleep();
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
#if IS_ENABLED(CONFIG_LOCKDEP)
@@ -555,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,


	timeout = i915_gem_object_wait_reservation(obj->resv,
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
						   flags, timeout,
						   rps);
						   rps_client);
	return timeout < 0 ? timeout : 0;
	return timeout < 0 ? timeout : 0;
}
}


@@ -563,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
{
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct drm_i915_file_private *fpriv = file->driver_priv;


	return &fpriv->rps;
	return &fpriv->rps_client;
}
}


static int
static int
+1 −1
Original line number Original line Diff line number Diff line
@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)


	spin_lock_irq(&request->lock);
	spin_lock_irq(&request->lock);
	if (request->waitboost)
	if (request->waitboost)
		atomic_dec(&request->i915->rps.num_waiters);
		atomic_dec(&request->i915->gt_pm.rps.num_waiters);
	dma_fence_signal_locked(&request->fence);
	dma_fence_signal_locked(&request->fence);
	spin_unlock_irq(&request->lock);
	spin_unlock_irq(&request->lock);


Loading