Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6e82a3be authored by Ingo Molnar's avatar Ingo Molnar
Browse files

sched: optimize update_rq_clock() calls in the load-balancer



optimize update_rq_clock() calls in the load-balancer: update them
right after locking the runqueue(s) so that the pull functions do
not have to call it.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2daa3577
Loading
Loading
Loading
Loading
+8 −6
Original line number Original line Diff line number Diff line
@@ -2017,6 +2017,8 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
			spin_lock(&rq1->lock);
			spin_lock(&rq1->lock);
		}
		}
	}
	}
	update_rq_clock(rq1);
	update_rq_clock(rq2);
}
}


/*
/*
@@ -2113,10 +2115,8 @@ void sched_exec(void)
static void pull_task(struct rq *src_rq, struct task_struct *p,
static void pull_task(struct rq *src_rq, struct task_struct *p,
		      struct rq *this_rq, int this_cpu)
		      struct rq *this_rq, int this_cpu)
{
{
	update_rq_clock(src_rq);
	deactivate_task(src_rq, p, 0);
	deactivate_task(src_rq, p, 0);
	set_task_cpu(p, this_cpu);
	set_task_cpu(p, this_cpu);
	__update_rq_clock(this_rq);
	activate_task(this_rq, p, 0);
	activate_task(this_rq, p, 0);
	/*
	/*
	 * Note that idle threads have a prio of MAX_PRIO, for this test
	 * Note that idle threads have a prio of MAX_PRIO, for this test
@@ -2798,6 +2798,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
	if (busiest->nr_running > 1) {
	if (busiest->nr_running > 1) {
		/* Attempt to move tasks */
		/* Attempt to move tasks */
		double_lock_balance(this_rq, busiest);
		double_lock_balance(this_rq, busiest);
		/* this_rq->clock is already updated */
		update_rq_clock(busiest);
		ld_moved = move_tasks(this_rq, this_cpu, busiest,
		ld_moved = move_tasks(this_rq, this_cpu, busiest,
					imbalance, sd, CPU_NEWLY_IDLE,
					imbalance, sd, CPU_NEWLY_IDLE,
					&all_pinned);
					&all_pinned);
@@ -2895,6 +2897,8 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)


	/* move a task from busiest_rq to target_rq */
	/* move a task from busiest_rq to target_rq */
	double_lock_balance(busiest_rq, target_rq);
	double_lock_balance(busiest_rq, target_rq);
	update_rq_clock(busiest_rq);
	update_rq_clock(target_rq);


	/* Search for an sd spanning us and the target CPU. */
	/* Search for an sd spanning us and the target CPU. */
	for_each_domain(target_cpu, sd) {
	for_each_domain(target_cpu, sd) {
@@ -4962,13 +4966,11 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
		goto out;
		goto out;


	on_rq = p->se.on_rq;
	on_rq = p->se.on_rq;
	if (on_rq) {
	if (on_rq)
		update_rq_clock(rq_src);
		deactivate_task(rq_src, p, 0);
		deactivate_task(rq_src, p, 0);
	}

	set_task_cpu(p, dest_cpu);
	set_task_cpu(p, dest_cpu);
	if (on_rq) {
	if (on_rq) {
		update_rq_clock(rq_dest);
		activate_task(rq_dest, p, 0);
		activate_task(rq_dest, p, 0);
		check_preempt_curr(rq_dest, p);
		check_preempt_curr(rq_dest, p);
	}
	}