Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1bf676cc authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Chris Wilson
Browse files

drm/i915: move and rename i915_runtime_pm



Asserts aside, all the code working on this structure is in
intel_runtime_pm.c and uses the intel_ prefix, so move the
structure to intel_runtime_pm.h and adopt the same prefix.

Since all the asserts are now working on the runtime_pm structure,
bring them across as well.

v2: drop unneeded include (Chris), don't rename debugfs, rebase

Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-5-daniele.ceraolospurio@intel.com
parent 9102650f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -2889,7 +2889,7 @@ static int intel_runtime_suspend(struct device *kdev)
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_runtime_pm *rpm = &dev_priv->runtime_pm;
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
	int ret;

	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
@@ -2988,7 +2988,7 @@ static int intel_runtime_resume(struct device *kdev)
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_runtime_pm *rpm = &dev_priv->runtime_pm;
	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
	int ret = 0;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+1 −51
Original line number Diff line number Diff line
@@ -1033,56 +1033,6 @@ struct skl_wm_params {
	u32 dbuf_block_size;
};

/*
 * This struct helps tracking the state needed for runtime PM, which puts the
 * device in PCI D3 state. Notice that when this happens, nothing on the
 * graphics device works, even register access, so we don't get interrupts nor
 * anything else.
 *
 * Every piece of our code that needs to actually touch the hardware needs to
 * either call intel_runtime_pm_get or call intel_display_power_get with the
 * appropriate power domain.
 *
 * Our driver uses the autosuspend delay feature, which means we'll only really
 * suspend if we stay with zero refcount for a certain amount of time. The
 * default value is currently very conservative (see intel_runtime_pm_enable), but
 * it can be changed with the standard runtime PM files from sysfs.
 *
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
 * goes back to false exactly before we reenable the IRQs. We use this variable
 * to check if someone is trying to enable/disable IRQs while they're supposed
 * to be disabled. This shouldn't happen and we'll print some error messages in
 * case it happens.
 *
 * For more, read the Documentation/power/runtime_pm.txt.
 */
struct i915_runtime_pm {
	atomic_t wakeref_count;
	struct device *kdev; /* points to i915->drm.pdev->dev */
	bool available;
	bool suspended;
	bool irqs_enabled;

#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
	/*
	 * To aide detection of wakeref leaks and general misuse, we
	 * track all wakeref holders. With manual markup (i.e. returning
	 * a cookie to each rpm_get caller which they then supply to their
	 * paired rpm_put) we can remove corresponding pairs of and keep
	 * the array trimmed to active wakerefs.
	 */
	struct intel_runtime_pm_debug {
		spinlock_t lock;

		depot_stack_handle_t last_acquire;
		depot_stack_handle_t last_release;

		depot_stack_handle_t *owners;
		unsigned long count;
	} debug;
#endif
};

enum intel_pipe_crc_source {
	INTEL_PIPE_CRC_SOURCE_NONE,
	INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1728,7 +1678,7 @@ struct drm_i915_private {

	struct drm_private_obj bw_obj;

	struct i915_runtime_pm runtime_pm;
	struct intel_runtime_pm runtime_pm;

	struct {
		bool initialized;
+0 −97
Original line number Diff line number Diff line
@@ -1613,101 +1613,4 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
				   unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);

/* intel_runtime_pm.c */
#define BITS_PER_WAKEREF	\
	BITS_PER_TYPE(struct_member(struct i915_runtime_pm, wakeref_count))
#define INTEL_RPM_WAKELOCK_SHIFT	(BITS_PER_WAKEREF / 2)
#define INTEL_RPM_WAKELOCK_BIAS		(1 << INTEL_RPM_WAKELOCK_SHIFT)
#define INTEL_RPM_RAW_WAKEREF_MASK	(INTEL_RPM_WAKELOCK_BIAS - 1)

static inline int
intel_rpm_raw_wakeref_count(int wakeref_count)
{
	return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
}

static inline int
intel_rpm_wakelock_count(int wakeref_count)
{
	return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
}

static inline void
assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
{
	WARN_ONCE(rpm->suspended,
		  "Device suspended during HW access\n");
}

static inline void
__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
{
	assert_rpm_device_not_suspended(rpm);
	WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
		  "RPM raw-wakeref not held\n");
}

static inline void
__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
{
	__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
	WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
		  "RPM wakelock ref not held during HW access\n");
}

static inline void
assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm)
{
	__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
}

static inline void
assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
{
	__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
}

/**
 * disable_rpm_wakeref_asserts - disable the RPM assert checks
 * @rpm: the i915_runtime_pm structure
 *
 * This function disable asserts that check if we hold an RPM wakelock
 * reference, while keeping the device-not-suspended checks still enabled.
 * It's meant to be used only in special circumstances where our rule about
 * the wakelock refcount wrt. the device power state doesn't hold. According
 * to this rule at any point where we access the HW or want to keep the HW in
 * an active state we must hold an RPM wakelock reference acquired via one of
 * the intel_runtime_pm_get() helpers. Currently there are a few special spots
 * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
 * forcewake release timer, and the GPU RPS and hangcheck works. All other
 * users should avoid using this function.
 *
 * Any calls to this function must have a symmetric call to
 * enable_rpm_wakeref_asserts().
 */
static inline void
disable_rpm_wakeref_asserts(struct i915_runtime_pm *rpm)
{
	atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
		   &rpm->wakeref_count);
}

/**
 * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
 * @rpm: the i915_runtime_pm structure
 *
 * This function re-enables the RPM assert checks after disabling them with
 * disable_rpm_wakeref_asserts. It's meant to be used only in special
 * circumstances otherwise its use should be avoided.
 *
 * Any calls to this function must have a symmetric call to
 * disable_rpm_wakeref_asserts().
 */
static inline void
enable_rpm_wakeref_asserts(struct i915_runtime_pm *rpm)
{
	atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
		   &rpm->wakeref_count);
}

#endif /* __INTEL_DRV_H__ */
+21 −21
Original line number Diff line number Diff line
@@ -75,13 +75,13 @@ static void __print_depot_stack(depot_stack_handle_t stack,
	stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}

static void init_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
	spin_lock_init(&rpm->debug.lock);
}

static noinline depot_stack_handle_t
track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm)
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
	depot_stack_handle_t stack, *stacks;
	unsigned long flags;
@@ -113,7 +113,7 @@ track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm)
	return stack;
}

static void untrack_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm,
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
					     depot_stack_handle_t stack)
{
	unsigned long flags, n;
@@ -233,7 +233,7 @@ dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
}

static noinline void
__intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm)
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
	struct intel_runtime_pm_debug dbg = {};
	unsigned long flags;
@@ -250,7 +250,7 @@ __intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm)
}

static noinline void
untrack_all_intel_runtime_pm_wakerefs(struct i915_runtime_pm *rpm)
untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
	struct intel_runtime_pm_debug dbg = {};
	unsigned long flags;
@@ -268,7 +268,7 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
	struct intel_runtime_pm_debug dbg = {};

	do {
		struct i915_runtime_pm *rpm = &i915->runtime_pm;
		struct intel_runtime_pm *rpm = &i915->runtime_pm;
		unsigned long alloc = dbg.count;
		depot_stack_handle_t *s;

@@ -302,36 +302,36 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,

#else

static void init_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}

static depot_stack_handle_t
track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm)
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
	return -1;
}

static void untrack_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm,
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
					     intel_wakeref_t wref)
{
}

static void
__intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm)
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
	atomic_dec(&rpm->wakeref_count);
}

static void
untrack_all_intel_runtime_pm_wakerefs(struct i915_runtime_pm *rpm)
untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
}

#endif

static void
intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock)
intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
{
	if (wakelock) {
		atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
@@ -343,7 +343,7 @@ intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock)
}

static void
intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock)
intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
{
	if (wakelock) {
		assert_rpm_wakelock_held(rpm);
@@ -355,7 +355,7 @@ intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock)
	__intel_wakeref_dec_and_check_tracking(rpm);
}

static intel_wakeref_t __intel_runtime_pm_get(struct i915_runtime_pm *rpm,
static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
					      bool wakelock)
{
	int ret;
@@ -424,7 +424,7 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
 */
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
{
	struct i915_runtime_pm *rpm = &i915->runtime_pm;
	struct intel_runtime_pm *rpm = &i915->runtime_pm;

	if (IS_ENABLED(CONFIG_PM)) {
		/*
@@ -463,7 +463,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
 */
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
{
	struct i915_runtime_pm *rpm = &i915->runtime_pm;
	struct intel_runtime_pm *rpm = &i915->runtime_pm;

	assert_rpm_wakelock_held(rpm);
	pm_runtime_get_noresume(rpm->kdev);
@@ -473,7 +473,7 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
	return track_intel_runtime_pm_wakeref(rpm);
}

static void __intel_runtime_pm_put(struct i915_runtime_pm *rpm,
static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
				   intel_wakeref_t wref,
				   bool wakelock)
{
@@ -547,7 +547,7 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
 */
void intel_runtime_pm_enable(struct drm_i915_private *i915)
{
	struct i915_runtime_pm *rpm = &i915->runtime_pm;
	struct intel_runtime_pm *rpm = &i915->runtime_pm;
	struct device *kdev = rpm->kdev;

	/*
@@ -589,7 +589,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915)

void intel_runtime_pm_disable(struct drm_i915_private *i915)
{
	struct i915_runtime_pm *rpm = &i915->runtime_pm;
	struct intel_runtime_pm *rpm = &i915->runtime_pm;
	struct device *kdev = rpm->kdev;

	/* Transfer rpm ownership back to core */
@@ -604,7 +604,7 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915)

void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
{
	struct i915_runtime_pm *rpm = &i915->runtime_pm;
	struct intel_runtime_pm *rpm = &i915->runtime_pm;
	int count = atomic_read(&rpm->wakeref_count);

	WARN(count,
@@ -617,7 +617,7 @@ void intel_runtime_pm_cleanup(struct drm_i915_private *i915)

void intel_runtime_pm_init_early(struct drm_i915_private *i915)
{
	struct i915_runtime_pm *rpm = &i915->runtime_pm;
	struct intel_runtime_pm *rpm = &i915->runtime_pm;
	struct pci_dev *pdev = i915->drm.pdev;
	struct device *kdev = &pdev->dev;

+149 −0
Original line number Diff line number Diff line
@@ -11,6 +11,9 @@
#include "intel_display.h"
#include "intel_wakeref.h"

#include "i915_utils.h"

struct device;
struct drm_i915_private;
struct drm_printer;

@@ -20,6 +23,152 @@ enum i915_drm_suspend_mode {
	I915_DRM_SUSPEND_HIBERNATE,
};

/*
 * This struct helps tracking the state needed for runtime PM, which puts the
 * device in PCI D3 state. Notice that when this happens, nothing on the
 * graphics device works, even register access, so we don't get interrupts nor
 * anything else.
 *
 * Every piece of our code that needs to actually touch the hardware needs to
 * either call intel_runtime_pm_get or call intel_display_power_get with the
 * appropriate power domain.
 *
 * Our driver uses the autosuspend delay feature, which means we'll only really
 * suspend if we stay with zero refcount for a certain amount of time. The
 * default value is currently very conservative (see intel_runtime_pm_enable), but
 * it can be changed with the standard runtime PM files from sysfs.
 *
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
 * goes back to false exactly before we reenable the IRQs. We use this variable
 * to check if someone is trying to enable/disable IRQs while they're supposed
 * to be disabled. This shouldn't happen and we'll print some error messages in
 * case it happens.
 *
 * For more, read the Documentation/power/runtime_pm.txt.
 */
struct intel_runtime_pm {
	atomic_t wakeref_count;
	struct device *kdev; /* points to i915->drm.pdev->dev */
	bool available;
	bool suspended;
	bool irqs_enabled;

#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
	/*
	 * To aide detection of wakeref leaks and general misuse, we
	 * track all wakeref holders. With manual markup (i.e. returning
	 * a cookie to each rpm_get caller which they then supply to their
	 * paired rpm_put) we can remove corresponding pairs of and keep
	 * the array trimmed to active wakerefs.
	 */
	struct intel_runtime_pm_debug {
		spinlock_t lock;

		depot_stack_handle_t last_acquire;
		depot_stack_handle_t last_release;

		depot_stack_handle_t *owners;
		unsigned long count;
	} debug;
#endif
};

#define BITS_PER_WAKEREF	\
	BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
#define INTEL_RPM_WAKELOCK_SHIFT	(BITS_PER_WAKEREF / 2)
#define INTEL_RPM_WAKELOCK_BIAS		(1 << INTEL_RPM_WAKELOCK_SHIFT)
#define INTEL_RPM_RAW_WAKEREF_MASK	(INTEL_RPM_WAKELOCK_BIAS - 1)

static inline int
intel_rpm_raw_wakeref_count(int wakeref_count)
{
	return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
}

static inline int
intel_rpm_wakelock_count(int wakeref_count)
{
	return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
}

static inline void
assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
{
	WARN_ONCE(rpm->suspended,
		  "Device suspended during HW access\n");
}

static inline void
__assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
{
	assert_rpm_device_not_suspended(rpm);
	WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
		  "RPM raw-wakeref not held\n");
}

static inline void
__assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
{
	__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
	WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
		  "RPM wakelock ref not held during HW access\n");
}

static inline void
assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
{
	__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
}

static inline void
assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
{
	__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
}

/**
 * disable_rpm_wakeref_asserts - disable the RPM assert checks
 * @rpm: the intel_runtime_pm structure
 *
 * This function disable asserts that check if we hold an RPM wakelock
 * reference, while keeping the device-not-suspended checks still enabled.
 * It's meant to be used only in special circumstances where our rule about
 * the wakelock refcount wrt. the device power state doesn't hold. According
 * to this rule at any point where we access the HW or want to keep the HW in
 * an active state we must hold an RPM wakelock reference acquired via one of
 * the intel_runtime_pm_get() helpers. Currently there are a few special spots
 * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
 * forcewake release timer, and the GPU RPS and hangcheck works. All other
 * users should avoid using this function.
 *
 * Any calls to this function must have a symmetric call to
 * enable_rpm_wakeref_asserts().
 */
static inline void
disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
{
	atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
		   &rpm->wakeref_count);
}

/**
 * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
 * @rpm: the intel_runtime_pm structure
 *
 * This function re-enables the RPM assert checks after disabling them with
 * disable_rpm_wakeref_asserts. It's meant to be used only in special
 * circumstances otherwise its use should be avoided.
 *
 * Any calls to this function must have a symmetric call to
 * disable_rpm_wakeref_asserts().
 */
static inline void
enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
{
	atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
		   &rpm->wakeref_count);
}

void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
Loading