Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4719c01d authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "cpuidle: Fix cpu frequent exits from low power mode"

parents 3f0a7110 ae04811d
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -430,7 +430,6 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
	 * data back is to call:
	 */
	tick_nohz_idle_enter();
	tick_nohz_idle_stop_tick_protected();

	cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
}
+2 −8
Original line number Diff line number Diff line
@@ -264,18 +264,12 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 *
 * @drv: the cpuidle driver
 * @dev: the cpuidle device
 * @stop_tick: indication on whether or not to stop the tick
 *
 * Returns the index of the idle state.  The return value must not be negative.
 *
 * The memory location pointed to by @stop_tick is expected to be written the
 * 'false' boolean value if the scheduler tick should not be stopped before
 * entering the returned state.
 */
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		   bool *stop_tick)
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	return cpuidle_curr_governor->select(drv, dev, stop_tick);
	return cpuidle_curr_governor->select(drv, dev);
}

/**
+1 −2
Original line number Diff line number Diff line
@@ -62,10 +62,9 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
 * ladder_select_state - selects the next state to enter
 * @drv: cpuidle driver
 * @dev: the CPU
 * @dummy: not used
 */
static int ladder_select_state(struct cpuidle_driver *drv,
			       struct cpuidle_device *dev, bool *dummy)
				struct cpuidle_device *dev)
{
	struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
	struct ladder_device_state *last_state;
+20 −93
Original line number Diff line number Diff line
@@ -123,7 +123,6 @@
struct menu_device {
	int		last_state_idx;
	int             needs_update;
	int             tick_wakeup;

	unsigned int	next_timer_us;
	unsigned int	predicted_us;
@@ -285,10 +284,8 @@ static unsigned int get_typical_interval(struct menu_device *data)
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 * @stop_tick: indication on whether or not to stop the tick
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		       bool *stop_tick)
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = this_cpu_ptr(&menu_devices);
	struct device *device = get_cpu_device(dev->cpu);
@@ -300,7 +297,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
	unsigned int expected_interval;
	unsigned long nr_iowaiters, cpu_load;
	int resume_latency = dev_pm_qos_raw_read_value(device);
	ktime_t delta_next;

	if (data->needs_update) {
		menu_update(drv, dev);
@@ -312,13 +308,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		latency_req = resume_latency;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		*stop_tick = false;
	if (unlikely(latency_req == 0))
		return 0;
	}

	/* determine the expected residency time, round up */
	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());

	get_iowait_load(&nr_iowaiters, &cpu_load);
	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
@@ -357,20 +351,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
	 */
	data->predicted_us = min(data->predicted_us, expected_interval);

	if (tick_nohz_tick_stopped()) {
		/*
		 * If the tick is already stopped, the cost of possible short
		 * idle duration misprediction is much higher, because the CPU
		 * may be stuck in a shallow idle state for a long time as a
		 * result of it.  In that case say we might mispredict and try
		 * to force the CPU into a state for which we would have stopped
		 * the tick, unless a timer is going to expire really soon
		 * anyway.
		 */
		if (data->predicted_us < TICK_USEC)
			data->predicted_us = min_t(unsigned int, TICK_USEC,
						   ktime_to_us(delta_next));
	} else {
	/*
	 * Use the performance multiplier and the user-configurable
	 * latency_req to determine the maximum exit latency.
@@ -378,9 +358,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
	interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
	if (latency_req > interactivity_req)
		latency_req = interactivity_req;
	}

	expected_interval = data->predicted_us;
	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
@@ -396,52 +374,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
			idx = i; /* first enabled state */
		if (s->target_residency > data->predicted_us)
			break;
		if (s->exit_latency > latency_req) {
			/*
			 * If we break out of the loop for latency reasons, use
			 * the target residency of the selected state as the
			 * expected idle duration so that the tick is retained
			 * as long as that target residency is low enough.
			 */
			expected_interval = drv->states[idx].target_residency;
		if (s->exit_latency > latency_req)
			break;
		}

		idx = i;
	}

	if (idx == -1)
		idx = 0; /* No states enabled. Must use 0. */

	/*
	 * Don't stop the tick if the selected state is a polling one or if the
	 * expected idle duration is shorter than the tick period length.
	 */
	if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
	    expected_interval < TICK_USEC) {
		unsigned int delta_next_us = ktime_to_us(delta_next);

		*stop_tick = false;

		if (!tick_nohz_tick_stopped() && idx > 0 &&
		    drv->states[idx].target_residency > delta_next_us) {
			/*
			 * The tick is not going to be stopped and the target
			 * residency of the state to be returned is not within
			 * the time until the next timer event including the
			 * tick, so try to correct that.
			 */
			for (i = idx - 1; i >= 0; i--) {
			    if (drv->states[i].disabled ||
			        dev->states_usage[i].disable)
					continue;

				idx = i;
				if (drv->states[i].target_residency <= delta_next_us)
					break;
			}
		}
	}

	data->last_state_idx = idx;

	return data->last_state_idx;
@@ -461,7 +402,6 @@ static void menu_reflect(struct cpuidle_device *dev, int index)

	data->last_state_idx = index;
	data->needs_update = 1;
	data->tick_wakeup = tick_nohz_idle_got_tick();
}

/**
@@ -492,18 +432,6 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
	 * assume the state was never reached and the exit latency is 0.
	 */

	if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
		/*
		 * The nohz code said that there wouldn't be any events within
		 * the tick boundary (if the tick was stopped), but the idle
		 * duration predictor had a differing opinion.  Since the CPU
		 * was woken up by a tick (that wasn't stopped after all), the
		 * predictor was not quite right, so assume that the CPU could
		 * have been idle long (but not forever) to help the idle
		 * duration predictor do a better job next time.
		 */
		measured_us = 9 * MAX_INTERESTING / 10;
	} else {
	/* measured value */
	measured_us = cpuidle_get_last_residency(dev);

@@ -512,7 +440,6 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
		measured_us -= target->exit_latency;
	else
		measured_us /= 2;
	}

	/* Make sure our coefficients do not exceed unity */
	if (measured_us > data->next_timer_us)
+2 −3
Original line number Diff line number Diff line
@@ -596,8 +596,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
	int best_level = 0;
	uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
							dev->cpu);
	ktime_t delta_next;
	s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
	s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
	uint32_t modified_time_us = 0;
	uint32_t next_event_us = 0;
	int i, idx_restrict;
@@ -1327,7 +1326,7 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
}

static int lpm_cpuidle_select(struct cpuidle_driver *drv,
		struct cpuidle_device *dev, bool *stop_tick)
		struct cpuidle_device *dev)
{
	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);

Loading