Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b2a034c authored by Abhijeet Dharmapurikar's avatar Abhijeet Dharmapurikar
Browse files

sched/walt: improve the scheduler



This change is for general scheduler improvement.

Change-Id: Ie31515dc09be0ca33ab9f418017bd7ae362cebb2
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
parent 7bee1e15
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1300,7 +1300,7 @@ TRACE_EVENT(sched_task_util,
		__field(bool, sync			)
		__field(bool, need_idle			)
		__field(int, fastpath			)
		__field(bool, placement_boost		)
		__field(int, placement_boost		)
		__field(int, rtg_cpu			)
		__field(u64, latency			)
	),
+8 −7
Original line number Diff line number Diff line
@@ -7005,7 +7005,8 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
	if (capacity == max_capacity)
		return true;

	if (task_boost_on_big_eligible(p) && is_min_capacity_cpu(cpu))
	if (task_boost_policy(p) == SCHED_BOOST_ON_BIG
			&& is_min_capacity_cpu(cpu))
		return false;

	return task_fits_capacity(p, capacity, cpu);
@@ -7013,7 +7014,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)

struct find_best_target_env {
	struct cpumask *rtg_target;
	bool placement_boost;
	int placement_boost;
	bool need_idle;
	int fastpath;
};
@@ -7025,7 +7026,7 @@ static bool is_packing_eligible(struct task_struct *p, int target_cpu,
{
	unsigned long tutil, estimated_capacity;

	if (fbt_env->placement_boost || fbt_env->need_idle)
	if (task_placement_boost_enabled(p) || fbt_env->need_idle)
		return false;

	if (best_idle_cstate == -1)
@@ -7384,14 +7385,14 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
		/*
		 * For placement boost (or otherwise), we start with group
		 * where the task should be placed. When
		 * placement boost is active, and we are not at the highest
		 * boost is active, and we are not at the highest
		 * capacity group reset the target_capacity to keep
		 * traversing to other higher clusters.
		 * If we already are at the highest capacity cluster we skip
		 * going around to the lower capacity cluster if we've found
		 * a cpu.
		 */
		if (fbt_env->placement_boost) {
		if (fbt_env->placement_boost == SCHED_BOOST_ON_BIG) {
			if (capacity_orig_of(group_first_cpu(sg)) <
				capacity_orig_of(group_first_cpu(sg->next)))
				target_capacity = ULONG_MAX;
@@ -7688,7 +7689,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
	struct cpumask *rtg_target = find_rtg_target(p);
	struct find_best_target_env fbt_env;
	bool need_idle = wake_to_idle(p);
	bool placement_boost = task_placement_boost_enabled(p);
	int placement_boost = task_boost_policy(p);
	u64 start_t = 0;
	int next_cpu = -1, backup_cpu = -1;

@@ -7789,7 +7790,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
			p->state == TASK_WAKING)
		delta = task_util(p);
#endif
	if (use_fbt && (fbt_env.placement_boost || fbt_env.need_idle ||
	if (use_fbt && (task_placement_boost_enabled(p) || fbt_env.need_idle ||
		(rtg_target && (!cpumask_test_cpu(prev_cpu, rtg_target) ||
			cpumask_test_cpu(next_cpu, rtg_target))) ||
		 __cpu_overutilized(prev_cpu, delta) ||
+13 −12
Original line number Diff line number Diff line
@@ -2902,12 +2902,13 @@ static inline bool task_placement_boost_enabled(struct task_struct *p)
	return false;
}

static inline bool task_boost_on_big_eligible(struct task_struct *p)
{
	bool boost_on_big = task_sched_boost(p) &&
				sched_boost_policy() == SCHED_BOOST_ON_BIG;

	if (boost_on_big) {
static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
{
	enum sched_boost_policy policy = task_sched_boost(p) ?
							sched_boost_policy() :
							SCHED_BOOST_NONE;
	if (policy == SCHED_BOOST_ON_BIG) {
		/*
		 * Filter out tasks less than min task util threshold
		 * under conservative boost.
@@ -2915,10 +2916,10 @@ static inline bool task_boost_on_big_eligible(struct task_struct *p)
		if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
				task_util(p) <=
				sysctl_sched_min_task_util_for_boost_colocation)
			boost_on_big = false;
			policy = SCHED_BOOST_NONE;
	}

	return boost_on_big;
	return policy;
}

#else	/* CONFIG_SCHED_WALT */
@@ -2937,11 +2938,6 @@ static inline bool task_placement_boost_enabled(struct task_struct *p)
	return false;
}

static inline bool task_boost_on_big_eligible(struct task_struct *p)
{
	return false;
}

static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }

static inline int sched_boost(void)
@@ -2949,6 +2945,11 @@ static inline int sched_boost(void)
	return 0;
}

static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
{
	return SCHED_BOOST_NONE;
}

static inline bool
task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
+1 −1
Original line number Diff line number Diff line
@@ -2554,7 +2554,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
		return;

	list_for_each_entry(p, &grp->tasks, grp_list) {
		if (task_boost_on_big_eligible(p)) {
		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG) {
			group_boost = true;
			break;
		}