Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3c4017c1 authored by Daniel Lezcano's avatar Daniel Lezcano Committed by Ingo Molnar
Browse files

sched: Move rq->idle_stamp up to the core



idle_balance() modifies the rq->idle_stamp field, making this information
shared across core.c and fair.c.

As we know if the cpu is going to idle or not with the previous patch, let's
encapsulate the rq->idle_stamp information in core.c by moving it up to the
caller.

The idle_balance() function returns true in case a balancing occured and the
cpu won't be idle, false if no balance happened and the cpu is going idle.

Signed-off-by: default avatarDaniel Lezcano <daniel.lezcano@linaro.org>
Cc: alex.shi@linaro.org
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1389949444-14821-3-git-send-email-daniel.lezcano@linaro.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e5fc6611
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -2704,8 +2704,15 @@ static void __sched __schedule(void)

	pre_schedule(rq, prev);

	if (unlikely(!rq->nr_running))
		idle_balance(rq);
	if (unlikely(!rq->nr_running)) {
		/*
		 * We must set idle_stamp _before_ calling idle_balance(), such
		 * that we measure the duration of idle_balance() as idle time.
		 */
		rq->idle_stamp = rq_clock(rq);
		if (idle_balance(rq))
			rq->idle_stamp = 0;
	}

	put_prev_task(rq, prev);
	next = pick_next_task(rq);
+6 −8
Original line number Diff line number Diff line
@@ -6531,7 +6531,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 * idle_balance is called by schedule() if this_cpu is about to become
 * idle. Attempts to pull tasks from other CPUs.
 */
void idle_balance(struct rq *this_rq)
int idle_balance(struct rq *this_rq)
{
	struct sched_domain *sd;
	int pulled_task = 0;
@@ -6539,10 +6539,8 @@ void idle_balance(struct rq *this_rq)
	u64 curr_cost = 0;
	int this_cpu = this_rq->cpu;

	this_rq->idle_stamp = rq_clock(this_rq);

	if (this_rq->avg_idle < sysctl_sched_migration_cost)
		return;
		return 0;

	/*
	 * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6580,11 +6578,9 @@ void idle_balance(struct rq *this_rq)
		interval = msecs_to_jiffies(sd->balance_interval);
		if (time_after(next_balance, sd->last_balance + interval))
			next_balance = sd->last_balance + interval;
		if (pulled_task) {
			this_rq->idle_stamp = 0;
		if (pulled_task)
			break;
	}
	}
	rcu_read_unlock();

	raw_spin_lock(&this_rq->lock);
@@ -6594,7 +6590,7 @@ void idle_balance(struct rq *this_rq)
	 * A task could have be enqueued in the meantime
	 */
	if (this_rq->nr_running && !pulled_task)
		return;
		return 1;

	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
		/*
@@ -6606,6 +6602,8 @@ void idle_balance(struct rq *this_rq)

	if (curr_cost > this_rq->max_idle_balance_cost)
		this_rq->max_idle_balance_cost = curr_cost;

	return pulled_task;
}

/*
+1 −1
Original line number Diff line number Diff line
@@ -1158,7 +1158,7 @@ extern const struct sched_class idle_sched_class;
extern void update_group_power(struct sched_domain *sd, int cpu);

extern void trigger_load_balance(struct rq *rq);
extern void idle_balance(struct rq *this_rq);
extern int idle_balance(struct rq *this_rq);

extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);