Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50f30d5a authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/fair: Remove the unused code



sysctl_sched_is_big_little is hard coded to 1 and there is
no provision to change it. So find_best_target() path
is never exercised. Remove this unused code. The subsequent
patches make the code under sysctl_sched_is_big_little work
for both big.LITTLE and SMP systems.

Change-Id: I2bd0e121d3c59c58a7c81b2083db6910cce57766
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent cac747e9
Loading
Loading
Loading
Loading
+1 −123
Original line number Diff line number Diff line
@@ -6746,107 +6746,6 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
	return target;
}
 
static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
{
	int iter_cpu;
	int target_cpu = -1;
	int target_util = 0;
	int backup_capacity = 0;
	int best_idle_cpu = -1;
	int best_idle_cstate = INT_MAX;
	int backup_cpu = -1;
	unsigned long task_util_boosted, new_util;

	task_util_boosted = boosted_task_util(p);
	for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
		int cur_capacity;
		struct rq *rq;
		int idle_idx;

		/*
		 * Iterate from higher cpus for boosted tasks.
		 */
		int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;

		if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
			continue;

		/*
		 * p's blocked utilization is still accounted for on prev_cpu
		 * so prev_cpu will receive a negative bias due to the double
		 * accounting. However, the blocked utilization may be zero.
		 */
		new_util = cpu_util(i) + task_util_boosted;

		/*
		 * Ensure minimum capacity to grant the required boost.
		 * The target CPU can be already at a capacity level higher
		 * than the one required to boost the task.
		 */
		if (new_util > capacity_orig_of(i))
			continue;

#ifdef CONFIG_SCHED_WALT
		if (sched_cpu_high_irqload(i))
			continue;
#endif
		/*
		 * Unconditionally favoring tasks that prefer idle cpus to
		 * improve latency.
		 */
		if (idle_cpu(i) && prefer_idle) {
			if (best_idle_cpu < 0)
				best_idle_cpu = i;
			continue;
		}

		cur_capacity = capacity_curr_of(i);
		rq = cpu_rq(i);
		idle_idx = idle_get_state_idx(rq);

		if (new_util < cur_capacity) {
			if (cpu_rq(i)->nr_running) {
				if(prefer_idle) {
					// Find a target cpu with lowest
					// utilization.
					if (target_util == 0 ||
						target_util < new_util) {
						target_cpu = i;
						target_util = new_util;
					}
				} else {
					// Find a target cpu with highest
					// utilization.
					if (target_util == 0 ||
						target_util > new_util) {
						target_cpu = i;
						target_util = new_util;
					}
				}
			} else if (!prefer_idle) {
				if (best_idle_cpu < 0 ||
					(sysctl_sched_cstate_aware &&
						best_idle_cstate > idle_idx)) {
					best_idle_cstate = idle_idx;
					best_idle_cpu = i;
				}
			}
		} else if (backup_capacity == 0 ||
				backup_capacity > cur_capacity) {
			// Find a backup cpu with least capacity.
			backup_capacity = cur_capacity;
			backup_cpu = i;
		}
	}

	if (prefer_idle && best_idle_cpu >= 0)
		target_cpu = best_idle_cpu;
	else if (target_cpu < 0)
		target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;

	return target_cpu;
}

/*
 * Should task be woken to any available idle cpu?
 *
@@ -6925,7 +6824,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
	struct sched_domain *sd;
	struct sched_group *sg, *sg_target;
	int target_max_cap = INT_MAX;
	int target_cpu, targeted_cpus = 0;
	int target_cpu = -1, targeted_cpus = 0;
	unsigned long task_util_boosted = 0, curr_util = 0;
	long new_util, new_util_cum;
	int i;
@@ -7033,8 +6932,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
			}
		} while (sg = sg->next, sg != sd->groups);

		target_cpu = -1;

		cpumask_copy(&search_cpus, tsk_cpus_allowed(p));
		cpumask_and(&search_cpus, &search_cpus,
			    sched_group_cpus(sg_target));
@@ -7203,25 +7100,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
			else
				target_cpu = task_cpu(p);
		}
	} else {
		/*
		 * Find a cpu with sufficient capacity
		 */
#ifdef CONFIG_CGROUP_SCHEDTUNE
		bool boosted = schedtune_task_boost(p) > 0;
		bool prefer_idle = schedtune_prefer_idle(p) > 0;
#else
		bool boosted = 0;
		bool prefer_idle = 0;
#endif
		int tmp_target = find_best_target(p, boosted, prefer_idle);

		target_cpu = task_cpu(p);
		if (tmp_target >= 0) {
			target_cpu = tmp_target;
			if ((boosted || prefer_idle) && idle_cpu(target_cpu))
				return target_cpu;
		}
	}

	if (target_cpu != task_cpu(p) && !avoid_prev_cpu &&