Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a75f1d1a authored by Blagovest Kolenichev's avatar Blagovest Kolenichev
Browse files

Revert "ANDROID: sched/fair: prevent possible infinite loop in sched_group_energy"



This reverts commit 727eafb4.

This is a preparation change for merging android-4.14 commit
e709f59a into msm-4.14 branch.
The reverted change is committed already as:

536e7d5d sched/fair: prevent possible infinite loop in compute_energy()

Change-Id: I1f44e4b332b973f71d8e5945b7557b8c2e98030f
Signed-off-by: default avatarBlagovest Kolenichev <bkolenichev@codeaurora.org>
parent 7d77e18d
Loading
Loading
Loading
Loading
+2 −30
Original line number Diff line number Diff line
@@ -6238,21 +6238,11 @@ static int compute_energy(struct energy_env *eenv)
	int cpu;
	struct cpumask visit_cpus;
	struct sched_group *sg;
	int cpu_count;

	WARN_ON(!eenv->sg_top->sge);

	cpumask_copy(&visit_cpus, sched_group_span(eenv->sg_top));
	/* If a cpu is hotplugged in while we are in this function, it does
	 * not appear in the existing visit_cpus mask which came from the
	 * sched_group pointer of the sched_domain pointed at by sd_ea for
	 * either the prev or next cpu and was dereferenced in
	 * select_energy_cpu_idx.
	 * Since we will dereference sd_scs later as we iterate through the
	 * CPUs we expect to visit, new CPUs can be present which are not in
	 * the visit_cpus mask. Guard this with cpu_count.
	 */
	cpu_count = cpumask_weight(&visit_cpus);

	while (!cpumask_empty(&visit_cpus)) {
		struct sched_group *sg_shared_cap = NULL;

@@ -6261,8 +6251,6 @@ static int compute_energy(struct energy_env *eenv)
		/*
		 * Is the group utilization affected by cpus outside this
		 * sched_group?
		 * This sd may have groups with cpus which were not present
		 * when we took visit_cpus.
		 */
		sd = rcu_dereference(per_cpu(sd_scs, cpu));
		if (sd && sd->parent)
@@ -6288,24 +6276,8 @@ static int compute_energy(struct energy_env *eenv)
				calc_sg_energy(eenv);

				/* remove CPUs we have just visited */
				if (!sd->child) {
					/*
					 * cpu_count here is the number of
					 * cpus we expect to visit in this
					 * calculation. If we race against
					 * hotplug, we can have extra cpus
					 * added to the groups we are
					 * iterating which do not appear in
					 * the visit_cpus mask. In that case
					 * we are not able to calculate energy
					 * without restarting so we will bail
					 * out and use prev_cpu this time.
					 */
					if (!cpu_count)
						return -EINVAL;
				if (!sd->child)
					cpumask_xor(&visit_cpus, &visit_cpus, sched_group_span(sg));
					cpu_count--;
				}

				if (cpumask_equal(sched_group_span(sg), sched_group_span(eenv->sg_top)))
					goto next_cpu;