Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2bd9202 authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by Andi Kleen
Browse files

cpuidle: Do not use poll_idle unless user asks for it



poll_idle was added to CPUIDLE, just as a low latency idle handler, to be
used in cases when user desires CPUs not to enter any idle state at all. It
was supposed to be a run time idle=poll option to the user. But, it was indeed
getting used during normal menu and ladder governor default case, with no
special user setting (Reported by Linus Torvalds).

Change below ensures that poll_idle will not be used unless user explicitly
asks pm_qos infrastructure for zero latency requirement.

Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
parent b635acec
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
	struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
	struct ladder_device_state *last_state;
	int last_residency, last_idx = ldev->last_state_idx;
	int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);

	if (unlikely(!ldev))
		return 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		ladder_do_selection(ldev, last_idx, 0);
		return 0;
	}

	last_state = &ldev->states[last_idx];

	if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
	/* consider promotion */
	if (last_idx < dev->state_count - 1 &&
	    last_residency > last_state->threshold.promotion_time &&
	    dev->states[last_idx + 1].exit_latency <=
			pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
	    dev->states[last_idx + 1].exit_latency <= latency_req) {
		last_state->stats.promotion_count++;
		last_state->stats.demotion_count = 0;
		if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -92,7 +98,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
	}

	/* consider demotion */
	if (last_idx > 0 &&
	if (last_idx > CPUIDLE_DRIVER_STATE_START &&
	    last_residency < last_state->threshold.demotion_time) {
		last_state->stats.demotion_count++;
		last_state->stats.promotion_count = 0;
@@ -117,7 +123,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
	struct ladder_device_state *lstate;
	struct cpuidle_state *state;

	ldev->last_state_idx = 0;
	ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	for (i = 0; i < dev->state_count; i++) {
		state = &dev->states[i];
+9 −2
Original line number Diff line number Diff line
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
	int i;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		data->last_state_idx = 0;
		return 0;
	}

	/* determine the expected residency time */
	data->expected_us =
		(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;

	/* find the deepest idle state that satisfies our constraints */
	for (i = 1; i < dev->state_count; i++) {
	for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->target_residency > data->expected_us)
			break;
		if (s->target_residency > data->predicted_us)
			break;
		if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
		if (s->exit_latency > latency_req)
			break;
	}