Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 09e0eb02 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: qhmp: consider only the active CPUs in select_best_cpu()



A CPU is marked inactive before it is marked offline during the
hotplug. Placing a task on an inactive but online CPU has the
following problems.

(1) It leads to a suboptimal migration to a fallback CPU later.

(2) unthrottle_offline_cfs_rqs() iterates over rq->leaf_cfs_rq_list
during hotplug to make sure that cfs_rq are not throttled after
a CPU is marked inactive. If we place a task on an inactive CPU
after this phase, there is a possiblity of adding a cfs_rq that
was not present in rq->leaf_cfs_rq_list earlier. If this cfs_rq
is throttled now, the enqueued tasks are not visible during the
task migration. These tasks are left on an offline CPU. When the
runtime is refilled later, we end up sending an IPI to an offline
CPU.

Change-Id: I6c61ad4f534cc55407e8b64a7b429edd9cc9c3fc
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 2ff63559
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -3056,7 +3056,7 @@ static int best_small_task_cpu(struct task_struct *p, int sync)

	hmp_capable = !cpumask_equal(&mpc_mask, cpu_possible_mask);

	cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
	cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_active_mask);
	if (unlikely(!cpumask_test_cpu(i, &search_cpu))) {
		i = cpumask_first(&search_cpu);
		if (i >= nr_cpu_ids)
@@ -3121,7 +3121,7 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
		return min_cstate_cpu;

	if (!sysctl_sched_restrict_tasks_spread) {
		cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
		cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_active_mask);
		cpumask_andnot(&search_cpu, &search_cpu, &fb_search_cpu);
		for_each_cpu(i, &search_cpu) {
			rq = cpu_rq(i);
@@ -3244,7 +3244,7 @@ static int select_packing_target(struct task_struct *p, int best_cpu)
	if (rq->max_freq <= rq->mostly_idle_freq)
		return best_cpu;

	cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
	cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
	cpumask_and(&search_cpus, &search_cpus, &rq->freq_domain_cpumask);

	/* Pick the first lowest power cpu as target */
@@ -3316,7 +3316,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
	}

	trq = task_rq(p);
	cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
	cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
	for_each_cpu(i, &search_cpus) {
		struct rq *rq = cpu_rq(i);

@@ -4030,7 +4030,7 @@ static int lower_power_cpu_available(struct task_struct *p, int cpu)
	 * This function should be called only when task 'p' fits in the current
	 * CPU which can be ensured by task_will_fit() prior to this.
	 */
	cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
	cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
	cpumask_and(&search_cpus, &search_cpus, &rq->freq_domain_cpumask);
	cpumask_clear_cpu(lowest_power_cpu, &search_cpus);