Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e293db0 authored by Joonwoo Park's avatar Joonwoo Park Committed by Todd Kjos
Browse files

sched: EAS: upmigrate misfit current task



Upmigrate misfit current task upon scheduler tick with stopper.

We can kick an random (not necessarily big CPU) NOHZ idle CPU when a
CPU bound task is in need of upmigration.  But it's not efficient as that
way needs following unnecessary wakeups:

  1. Busy little CPU A to kick idle B
  2. B runs idle balancer and enqueue migration/A
  3. B goes idle
  4. A runs migration/A, enqueues busy task on B.
  5. B wakes up again.

This change makes active upmigration more efficiently by doing:

  1. Busy little CPU A find target CPU B upon tick.
  2. CPU A enqueues migration/A.

Change-Id: Ie865738054ea3296f28e6ba01710635efa7193c0
[joonwoop: The original version had logic to reserve CPU.  The logic is
 omitted in this version.]
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>
parent dc626b28
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -3097,6 +3097,9 @@ void scheduler_tick(void)
	trigger_load_balance(rq);
#endif
	rq_last_tick_reset(rq);

	if (curr->sched_class == &fair_sched_class)
		check_for_migration(rq, curr);
}

#ifdef CONFIG_NO_HZ_FULL
+46 −2
Original line number Diff line number Diff line
@@ -6312,7 +6312,9 @@ done:

/*
 * cpu_util_wake: Compute cpu utilization with any contributions from
 * the waking task p removed.
 * the waking task p removed.  check_for_migration() looks for a better CPU of
 * rq->curr. For that case we should return cpu util with contributions from
 * currently running task p removed.
 */
static int cpu_util_wake(int cpu, struct task_struct *p)
{
@@ -6325,7 +6327,8 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
	 * utilization from cpu utilization. Instead just use
	 * cpu_util for this case.
	 */
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
	    p->state == TASK_WAKING)
		return cpu_util(cpu);
#endif
	/* Task has no contribution or is new */
@@ -9974,6 +9977,47 @@ static void rq_offline_fair(struct rq *rq)
	unthrottle_offline_cfs_rqs(rq);
}

static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
	int rc = 0;

	/* Invoke active balance to force migrate currently running task */
	raw_spin_lock(&rq->lock);
	if (!rq->active_balance) {
		rq->active_balance = 1;
		rq->push_cpu = new_cpu;
		get_task_struct(p);
		rq->push_task = p;
		rc = 1;
	}
	raw_spin_unlock(&rq->lock);

	return rc;
}

void check_for_migration(struct rq *rq, struct task_struct *p)
{
	int new_cpu;
	int active_balance;
	int cpu = task_cpu(p);

	if (rq->misfit_task) {
		if (rq->curr->state != TASK_RUNNING ||
		    rq->curr->nr_cpus_allowed == 1)
			return;

		new_cpu = select_energy_cpu_brute(p, cpu, 0);
		if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
			active_balance = kick_active_balance(rq, p, new_cpu);
			if (active_balance)
				stop_one_cpu_nowait(cpu,
						active_load_balance_cpu_stop,
						rq, &rq->active_balance_work);
		}
	}
}

#endif /* CONFIG_SMP */

/*
+2 −0
Original line number Diff line number Diff line
@@ -31,8 +31,10 @@ extern long calc_load_fold_active(struct rq *this_rq);

#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif

/*