Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 291a2ce6 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I4da8fd848f9cd43d510ac2ae63605f051e723775
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 67e9f6f8
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -878,6 +878,7 @@ struct task_struct {
	bool misfit;
	u32 unfilter;
	bool low_latency;
	bool rtg_high_prio;
#endif

#ifdef CONFIG_CGROUP_SCHED
+5 −2
Original line number Diff line number Diff line
@@ -1145,6 +1145,7 @@ TRACE_EVENT(sched_cpu_util,
		__field(int,		isolated)
		__field(int,		reserved)
		__field(int,		high_irq_load)
		__field(unsigned int,	nr_rtg_high_prio_tasks)
	),

	TP_fast_assign(
@@ -1161,14 +1162,16 @@ TRACE_EVENT(sched_cpu_util,
		__entry->isolated           = cpu_isolated(cpu);
		__entry->reserved           = is_reserved(cpu);
		__entry->high_irq_load      = sched_cpu_high_irqload(cpu);
		__entry->nr_rtg_high_prio_tasks = walt_nr_rtg_high_prio(cpu);
	),

	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u",
	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u nr_rtg_hp=%u",
		__entry->cpu, __entry->nr_running, __entry->cpu_util,
		__entry->cpu_util_cum, __entry->capacity_curr,
		__entry->capacity, __entry->capacity_orig,
		__entry->idle_state, __entry->irqload, __entry->online,
		__entry->isolated, __entry->reserved, __entry->high_irq_load)
		__entry->isolated, __entry->reserved, __entry->high_irq_load,
		__entry->nr_rtg_high_prio_tasks)
);

TRACE_EVENT(sched_compute_energy,
+29 −3
Original line number Diff line number Diff line
@@ -4008,6 +4008,11 @@ static inline void adjust_cpus_for_packing(struct task_struct *p,
	if (prefer_spread_on_idle(*best_idle_cpu))
		fbt_env->need_idle |= 2;

	if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) {
		*target_cpu = -1;
		return;
	}

	if (fbt_env->need_idle || task_placement_boost_enabled(p) || boosted ||
		shallowest_idle_cstate <= 0) {
		*target_cpu = -1;
@@ -6936,6 +6941,8 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
	int prev_cpu = task_cpu(p);
	bool next_group_higher_cap = false;
	int isolated_candidate = -1;
	unsigned int target_nr_rtg_high_prio = UINT_MAX;
	bool rtg_high_prio_task = task_rtg_high_prio(p);

	/*
	 * In most cases, target_capacity tracks capacity_orig of the most
@@ -7237,12 +7244,31 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
			 * capacity.
			 */

			/*
			 * Try to spread the rtg high prio tasks so that they
			 * don't preempt each other. This is a optimisitc
			 * check assuming rtg high prio can actually preempt
			 * the current running task with the given vruntime
			 * boost.
			 */
			if (rtg_high_prio_task)  {
				if (walt_nr_rtg_high_prio(i) > target_nr_rtg_high_prio)
					continue;

				/* Favor CPUs with maximum spare capacity */
				if (walt_nr_rtg_high_prio(i) == target_nr_rtg_high_prio &&
						spare_cap < target_max_spare_cap)
					continue;

			} else {
				/* Favor CPUs with maximum spare capacity */
				if (spare_cap < target_max_spare_cap)
					continue;
			}

			target_max_spare_cap = spare_cap;
			target_capacity = capacity_orig;
			target_nr_rtg_high_prio = walt_nr_rtg_high_prio(i);
			target_cpu = i;
		}

+1 −0
Original line number Diff line number Diff line
@@ -105,6 +105,7 @@ struct walt_sched_stats {
	int nr_big_tasks;
	u64 cumulative_runnable_avg_scaled;
	u64 pred_demands_sum_scaled;
	unsigned int nr_rtg_high_prio_tasks;
};

struct group_cpu_time {
+7 −0
Original line number Diff line number Diff line
@@ -214,12 +214,19 @@ void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
	inc_nr_big_task(&rq->walt_stats, p);
	walt_inc_cumulative_runnable_avg(rq, p);

	p->rtg_high_prio = task_rtg_high_prio(p);
	if (p->rtg_high_prio)
		rq->walt_stats.nr_rtg_high_prio_tasks++;

}

void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
	dec_nr_big_task(&rq->walt_stats, p);
	walt_dec_cumulative_runnable_avg(rq, p);
	if (p->rtg_high_prio)
		rq->walt_stats.nr_rtg_high_prio_tasks--;
}

void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
Loading