Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c7d644f authored by Puja Gupta's avatar Puja Gupta
Browse files

sched: Don't update idle_cpus_mask during core isolation



Remove code to update nohz.idle_cpus_mask from
do_isolation_work_cpu_stop(), instead update find_new_ilb() to exclude
isolated_cpus. This is done to fix the issue where cpu is not present
in nohz.idle_cpus_mask but NOHZ_TICK_STOPPED is set. As a result even
if the cpu is unisolated, task won't see the cpu as idle due to
inconsistent nohz.idle_cpus_mask.
Also restore nohz_balance_exit_idle() to upstream version.

Change-Id: I126d256446fee541284a39d16c2a7739c2d30414
Signed-off-by: default avatarPuja Gupta <pujag@codeaurora.org>
parent d98d9ee8
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -5956,12 +5956,6 @@ int do_isolation_work_cpu_stop(void *data)
		set_rq_online(rq);
	raw_spin_unlock(&rq->lock);

	/*
	 * We might have been in tickless state. Clear NOHZ flags to avoid
	 * us being kicked for helping out with balancing
	 */
	nohz_balance_clear_nohz_mask(cpu);

	clear_walt_request(cpu);
	local_irq_enable();
	return 0;
+22 −14
Original line number Diff line number Diff line
@@ -9900,6 +9900,8 @@ static inline int find_new_ilb(int type)
	if (sd) {
		cpumask_and(&cpumask, nohz.idle_cpus_mask,
			    sched_domain_span(sd));
		cpumask_andnot(&cpumask, &cpumask,
			    cpu_isolated_mask);
		ilb = cpumask_first(&cpumask);
	}
	rcu_read_unlock();
@@ -9908,8 +9910,11 @@ static inline int find_new_ilb(int type)
		if (!energy_aware() ||
		    (capacity_orig_of(cpu) ==
		     cpu_rq(cpu)->rd->max_cpu_capacity.val ||
		     cpu_overutilized(cpu)))
			ilb = cpumask_first(nohz.idle_cpus_mask);
		     cpu_overutilized(cpu))) {
			cpumask_andnot(&cpumask, nohz.idle_cpus_mask,
			    cpu_isolated_mask);
			ilb = cpumask_first(&cpumask);
		}
	}

	if (ilb < nr_cpu_ids && idle_cpu(ilb))
@@ -9946,21 +9951,16 @@ static void nohz_balancer_kick(int type)
	return;
}

void nohz_balance_clear_nohz_mask(int cpu)
{
	if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
		atomic_dec(&nohz.nr_cpus);
	}
}

void nohz_balance_exit_idle(unsigned int cpu)
{
	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
		/*
		 * Completely isolated CPUs don't ever set, so we must test.
		 */
		nohz_balance_clear_nohz_mask(cpu);
		if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
			cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
			atomic_dec(&nohz.nr_cpus);
		}
		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
	}
}
@@ -10235,6 +10235,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
	int nr_busy;
	int cpu = rq->cpu;
	bool kick = false;
	cpumask_t cpumask;

	if (unlikely(rq->idle_balance))
		return false;
@@ -10250,7 +10251,8 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
	 * None are in tickless mode and hence no need for NOHZ idle load
	 * balancing.
	 */
	if (likely(!atomic_read(&nohz.nr_cpus)))
	cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask);
	if (cpumask_empty(&cpumask))
		return false;

	if (time_before(now, nohz.next_balance))
@@ -10284,8 +10286,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
	}

	sd = rcu_dereference(per_cpu(sd_asym, cpu));
	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
				  sched_domain_span(sd)) < cpu)) {
	if (sd && (cpumask_first_and(&cpumask, sched_domain_span(sd)) < cpu)) {
		kick = true;
		goto unlock;
	}
@@ -10308,6 +10309,13 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
	enum cpu_idle_type idle = this_rq->idle_balance ?
						CPU_IDLE : CPU_NOT_IDLE;

	/*
	 * Since core isolation doesn't update nohz.idle_cpus_mask, there
	 * is a possibility this nohz kicked cpu could be isolated. Hence
	 * return if the cpu is isolated.
	 */
	if (cpu_isolated(this_rq->cpu))
		return;
	/*
	 * If this cpu has a pending nohz_balance_kick, then do the
	 * balancing on behalf of the other idle cpus whose ticks are
+0 −1
Original line number Diff line number Diff line
@@ -1444,7 +1444,6 @@ extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
extern void update_group_capacity(struct sched_domain *sd, int cpu);

extern void trigger_load_balance(struct rq *rq);
extern void nohz_balance_clear_nohz_mask(int cpu);

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);