Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ca80944 authored by Daniel Lezcano's avatar Daniel Lezcano Committed by Rafael J. Wysocki
Browse files

POWERPC: pseries: cpuidle: use time keeping flag



The current code computes the idle time but that can be handled
by the cpuidle framework if we enable the .en_core_tk_irqen flag.

Set the flag and remove the code related to the time computation.

Signed-off-by: default avatarDaniel Lezcano <daniel.lezcano@linaro.org>
Signed-off-by: default avatarDeepthi Dharwar <deepthi@linux.vnet.ibm.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 561a07ac
Loading
Loading
Loading
Loading
+15 −20
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
struct cpuidle_driver pseries_idle_driver = {
	.name             = "pseries_idle",
	.owner            = THIS_MODULE,
	.en_core_tk_irqen = 1,
};

#define MAX_IDLE_STATE_COUNT	2
@@ -33,10 +34,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
static struct cpuidle_device __percpu *pseries_cpuidle_devices;
static struct cpuidle_state *cpuidle_state_table;

static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
static inline void idle_loop_prolog(unsigned long *in_purr)
{

	*kt_before = ktime_get();
	*in_purr = mfspr(SPRN_PURR);
	/*
	 * Indicate to the HV that we are idle. Now would be
@@ -45,12 +44,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
	get_lppaca()->idle = 1;
}

static inline  s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
static inline void idle_loop_epilog(unsigned long in_purr)
{
	get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
	get_lppaca()->idle = 0;

	return ktime_to_us(ktime_sub(ktime_get(), kt_before));
}

static int snooze_loop(struct cpuidle_device *dev,
@@ -58,10 +55,9 @@ static int snooze_loop(struct cpuidle_device *dev,
			int index)
{
	unsigned long in_purr;
	ktime_t kt_before;
	int cpu = dev->cpu;

	idle_loop_prolog(&in_purr, &kt_before);
	idle_loop_prolog(&in_purr);
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

@@ -75,8 +71,8 @@ static int snooze_loop(struct cpuidle_device *dev,
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();

	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	idle_loop_epilog(in_purr);

	return index;
}

@@ -102,9 +98,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
				int index)
{
	unsigned long in_purr;
	ktime_t kt_before;

	idle_loop_prolog(&in_purr, &kt_before);
	idle_loop_prolog(&in_purr);
	get_lppaca()->donate_dedicated_cpu = 1;

	ppc64_runlatch_off();
@@ -112,8 +107,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
	check_and_cede_processor();

	get_lppaca()->donate_dedicated_cpu = 0;
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);

	idle_loop_epilog(in_purr);

	return index;
}

@@ -122,9 +118,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
			int index)
{
	unsigned long in_purr;
	ktime_t kt_before;

	idle_loop_prolog(&in_purr, &kt_before);
	idle_loop_prolog(&in_purr);

	/*
	 * Yield the processor to the hypervisor.  We return if
@@ -135,8 +130,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
	 */
	check_and_cede_processor();

	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	idle_loop_epilog(in_purr);

	return index;
}