Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4a3004e5 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branch 'pm-cpuidle'

* pm-cpuidle:
  cpuidle: Do not use CPUIDLE_DRIVER_STATE_START in cpuidle.c
  cpuidle: Select a different state on tick_broadcast_enter() failures
  sched / idle: Call default_idle_call() from cpuidle_enter_state()
  sched / idle: Call idle_set_state() from cpuidle_enter_state()
  cpuidle: Fix the kerneldoc comment for cpuidle_enter_state()
  sched / idle: Eliminate the "reflect" check from cpuidle_idle_call()
  cpuidle: Check the sign of index in cpuidle_reflect()
  sched / idle: Move the default idle call code to a separate function
parents e193cd15 7d51d979
Loading
Loading
Loading
Loading
+28 −10
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ int cpuidle_play_dead(void)
		return -ENODEV;

	/* Find lowest-power state that supports long-term idle */
	for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
	for (i = drv->state_count - 1; i >= 0; i--)
		if (drv->states[i].enter_dead)
			return drv->states[i].enter_dead(dev, i);

@@ -73,16 +73,21 @@ int cpuidle_play_dead(void)
}

static int find_deepest_state(struct cpuidle_driver *drv,
			      struct cpuidle_device *dev, bool freeze)
			      struct cpuidle_device *dev,
			      unsigned int max_latency,
			      unsigned int forbidden_flags,
			      bool freeze)
{
	unsigned int latency_req = 0;
	int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
	int i, ret = -ENXIO;

	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
	for (i = 0; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable || s->exit_latency <= latency_req
		    || s->exit_latency > max_latency
		    || (s->flags & forbidden_flags)
		    || (freeze && !s->enter_freeze))
			continue;

@@ -100,7 +105,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
			       struct cpuidle_device *dev)
{
	return find_deepest_state(drv, dev, false);
	return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}

static void enter_freeze_proper(struct cpuidle_driver *drv,
@@ -139,7 +144,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
	 * that interrupts won't be enabled when it exits and allows the tick to
	 * be frozen safely.
	 */
	index = find_deepest_state(drv, dev, true);
	index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
	if (index >= 0)
		enter_freeze_proper(drv, dev, index);

@@ -150,7 +155,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 * cpuidle_enter_state - enter the state and update stats
 * @dev: cpuidle device for this cpu
 * @drv: cpuidle driver for this cpu
 * @next_state: index into drv->states of the state to enter
 * @index: index into the states table in @drv of the state to enter
 */
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
			int index)
@@ -167,8 +172,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
	 * local timer will be shut down.  If a local timer is used from another
	 * CPU as a broadcast timer, this call may fail if it is not available.
	 */
	if (broadcast && tick_broadcast_enter())
	if (broadcast && tick_broadcast_enter()) {
		index = find_deepest_state(drv, dev, target_state->exit_latency,
					   CPUIDLE_FLAG_TIMER_STOP, false);
		if (index < 0) {
			default_idle_call();
			return -EBUSY;
		}
		target_state = &drv->states[index];
	}

	/* Take note of the planned idle state. */
	sched_idle_set_state(target_state);

	trace_cpu_idle_rcuidle(index, dev->cpu);
	time_start = ktime_get();
@@ -178,6 +193,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
	time_end = ktime_get();
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);

	/* The cpu is no longer idle or about to enter idle. */
	sched_idle_set_state(NULL);

	if (broadcast) {
		if (WARN_ON_ONCE(!irqs_disabled()))
			local_irq_disable();
@@ -249,7 +267,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 */
void cpuidle_reflect(struct cpuidle_device *dev, int index)
{
	if (cpuidle_curr_governor->reflect)
	if (cpuidle_curr_governor->reflect && index >= 0)
		cpuidle_curr_governor->reflect(dev, index);
}

+2 −2
Original line number Diff line number Diff line
@@ -367,8 +367,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
static void menu_reflect(struct cpuidle_device *dev, int index)
{
	struct menu_device *data = this_cpu_ptr(&menu_devices);

	data->last_state_idx = index;
	if (index >= 0)
	data->needs_update = 1;
}

+4 −0
Original line number Diff line number Diff line
@@ -200,6 +200,10 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
	struct cpuidle_device *dev) {return NULL; }
#endif

/* kernel/sched/idle.c */
extern void sched_idle_set_state(struct cpuidle_state *idle_state);
extern void default_idle_call(void);

#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else
+59 −55
Original line number Diff line number Diff line
@@ -15,6 +15,15 @@

#include "sched.h"

/**
 * sched_idle_set_state - Record idle state for the current CPU.
 * @idle_state: State to record.
 */
void sched_idle_set_state(struct cpuidle_state *idle_state)
{
	idle_set_state(this_rq(), idle_state);
}

static int __read_mostly cpu_idle_force_poll;

void cpu_idle_poll_ctrl(bool enable)
@@ -67,6 +76,46 @@ void __weak arch_cpu_idle(void)
	local_irq_enable();
}

/**
 * default_idle_call - Default CPU idle routine.
 *
 * To use when the cpuidle framework cannot be used.
 */
void default_idle_call(void)
{
	if (current_clr_polling_and_test())
		local_irq_enable();
	else
		arch_cpu_idle();
}

static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		      int next_state)
{
	/* Fall back to the default arch idle method on errors. */
	if (next_state < 0) {
		default_idle_call();
		return next_state;
	}

	/*
	 * The idle task must be scheduled, it is pointless to go to idle, just
	 * update no idle residency and return.
	 */
	if (current_clr_polling_and_test()) {
		dev->last_residency = 0;
		local_irq_enable();
		return -EBUSY;
	}

	/*
	 * Enter the idle state previously returned by the governor decision.
	 * This function will block until an interrupt occurs and will take
	 * care of re-enabling the local interrupts
	 */
	return cpuidle_enter(drv, dev, next_state);
}

/**
 * cpuidle_idle_call - the main idle function
 *
@@ -81,7 +130,6 @@ static void cpuidle_idle_call(void)
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
	int next_state, entered_state;
	bool reflect;

	/*
	 * Check if the idle task must be rescheduled. If it is the
@@ -105,8 +153,10 @@ static void cpuidle_idle_call(void)
	 */
	rcu_idle_enter();

	if (cpuidle_not_available(drv, dev))
		goto use_default;
	if (cpuidle_not_available(drv, dev)) {
		default_idle_call();
		goto exit_idle;
	}

	/*
	 * Suspend-to-idle ("freeze") is a system state in which all user space
@@ -124,52 +174,19 @@ static void cpuidle_idle_call(void)
			goto exit_idle;
		}

		reflect = false;
		next_state = cpuidle_find_deepest_state(drv, dev);
		call_cpuidle(drv, dev, next_state);
	} else {
		reflect = true;
		/*
		 * Ask the cpuidle framework to choose a convenient idle state.
		 */
		next_state = cpuidle_select(drv, dev);
	}
	/* Fall back to the default arch idle method on errors. */
	if (next_state < 0)
		goto use_default;

	/*
	 * The idle task must be scheduled, it is pointless to
	 * go to idle, just update no idle residency and get
	 * out of this function
	 */
	if (current_clr_polling_and_test()) {
		dev->last_residency = 0;
		entered_state = next_state;
		local_irq_enable();
		goto exit_idle;
	}

	/* Take note of the planned idle state. */
	idle_set_state(this_rq(), &drv->states[next_state]);

	/*
	 * Enter the idle state previously returned by the governor decision.
	 * This function will block until an interrupt occurs and will take
	 * care of re-enabling the local interrupts
	 */
	entered_state = cpuidle_enter(drv, dev, next_state);

	/* The cpu is no longer idle or about to enter idle. */
	idle_set_state(this_rq(), NULL);

	if (entered_state == -EBUSY)
		goto use_default;

		entered_state = call_cpuidle(drv, dev, next_state);
		/*
		 * Give the governor an opportunity to reflect on the outcome
		 */
	if (reflect)
		cpuidle_reflect(dev, entered_state);
	}

exit_idle:
	__current_set_polling();
@@ -182,19 +199,6 @@ static void cpuidle_idle_call(void)

	rcu_idle_exit();
	start_critical_timings();
	return;

use_default:
	/*
	 * We can't use the cpuidle framework, let's use the default
	 * idle routine.
	 */
	if (current_clr_polling_and_test())
		local_irq_enable();
	else
		arch_cpu_idle();

	goto exit_idle;
}

DEFINE_PER_CPU(bool, cpu_dead_idle);