Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3c0be5df authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I4da8fd848f9cd43d510ac2ae63605f051e723775
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 7a8045df
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -618,6 +618,7 @@ struct walt_task_struct {
	int				boost;
	bool				wake_up_idle;
	bool				misfit;
	bool				rtg_high_prio;
	bool				low_latency;
	u64				boost_period;
	u64				boost_expires;
+5 −2
Original line number Diff line number Diff line
@@ -927,6 +927,7 @@ TRACE_EVENT(sched_cpu_util,
		__field(int,		isolated)
		__field(int,		reserved)
		__field(int,		high_irq_load)
		__field(unsigned int,	nr_rtg_high_prio_tasks)
	),

	TP_fast_assign(
@@ -943,14 +944,16 @@ TRACE_EVENT(sched_cpu_util,
		__entry->isolated           = cpu_isolated(cpu);
		__entry->reserved           = is_reserved(cpu);
		__entry->high_irq_load      = sched_cpu_high_irqload(cpu);
		__entry->nr_rtg_high_prio_tasks = walt_nr_rtg_high_prio(cpu);
	),

	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u",
	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u nr_rtg_hp=%u",
		__entry->cpu, __entry->nr_running, __entry->cpu_util,
		__entry->cpu_util_cum, __entry->capacity_curr,
		__entry->capacity, __entry->capacity_orig,
		__entry->idle_state, __entry->irqload, __entry->online,
		__entry->isolated, __entry->reserved, __entry->high_irq_load)
		__entry->isolated, __entry->reserved, __entry->high_irq_load,
		__entry->nr_rtg_high_prio_tasks)
);

TRACE_EVENT(sched_compute_energy,
+30 −3
Original line number Diff line number Diff line
@@ -3937,6 +3937,12 @@ static inline void walt_adjust_cpus_for_packing(struct task_struct *p,
	if (prefer_spread_on_idle(*best_idle_cpu))
		fbt_env->need_idle |= 2;

	if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) {
		*target_cpu = -1;
		return;
	}


	if (fbt_env->need_idle || task_placement_boost_enabled(p) ||
		fbt_env->boosted || shallowest_idle_cstate <= 0) {
		*target_cpu = -1;
@@ -6549,6 +6555,8 @@ static void walt_find_best_target(struct sched_domain *sd, cpumask_t *cpus,
	int unisolated_candidate = -1;
	int order_index = fbt_env->order_index, end_index = fbt_env->end_index;
	int cluster;
	unsigned int target_nr_rtg_high_prio = UINT_MAX;
	bool rtg_high_prio_task = task_rtg_high_prio(p);

	/* Find start CPU based on boost value */
	start_cpu = fbt_env->start_cpu;
@@ -6696,11 +6704,30 @@ static void walt_find_best_target(struct sched_domain *sd, cpumask_t *cpus,
			if (p->state == TASK_RUNNING)
				continue;

			/*
			 * Try to spread the rtg high prio tasks so that they
			 * don't preempt each other. This is a optimisitc
			 * check assuming rtg high prio can actually preempt
			 * the current running task with the given vruntime
			 * boost.
			 */
			if (rtg_high_prio_task)  {
				if (walt_nr_rtg_high_prio(i) > target_nr_rtg_high_prio)
					continue;

				/* Favor CPUs with maximum spare capacity */
				if (walt_nr_rtg_high_prio(i) == target_nr_rtg_high_prio &&
						spare_cap < target_max_spare_cap)
					continue;

			} else {
				/* Favor CPUs with maximum spare capacity */
				if (spare_cap < target_max_spare_cap)
					continue;
			}

			target_max_spare_cap = spare_cap;
			target_nr_rtg_high_prio = walt_nr_rtg_high_prio(i);
			target_cpu = i;
		}

+1 −0
Original line number Diff line number Diff line
@@ -105,6 +105,7 @@ struct walt_sched_stats {
	int nr_big_tasks;
	u64 cumulative_runnable_avg_scaled;
	u64 pred_demands_sum_scaled;
	unsigned int nr_rtg_high_prio_tasks;
};

struct walt_task_group {
+12 −0
Original line number Diff line number Diff line
@@ -78,6 +78,10 @@ static inline void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
	if (p->wts.misfit)
		rq->wrq.walt_stats.nr_big_tasks++;

	p->wts.rtg_high_prio = task_rtg_high_prio(p);
	if (p->wts.rtg_high_prio)
		rq->wrq.walt_stats.nr_rtg_high_prio_tasks++;

	walt_inc_cumulative_runnable_avg(rq, p);
}

@@ -86,6 +90,9 @@ static inline void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
	if (p->wts.misfit)
		rq->wrq.walt_stats.nr_big_tasks--;

	if (p->wts.rtg_high_prio)
		rq->wrq.walt_stats.nr_rtg_high_prio_tasks--;

	BUG_ON(rq->wrq.walt_stats.nr_big_tasks < 0);

	walt_dec_cumulative_runnable_avg(rq, p);
@@ -192,6 +199,11 @@ static inline void walt_try_to_wake_up(struct task_struct *p)
	rcu_read_unlock();
}

static inline unsigned int walt_nr_rtg_high_prio(int cpu)
{
	return cpu_rq(cpu)->wrq.walt_stats.nr_rtg_high_prio_tasks;
}

#else /* CONFIG_SCHED_WALT */

static inline void walt_sched_init_rq(struct rq *rq) { }