Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b3fd221 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: Improve the scheduler"

parents 867d35c7 291a2ce6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -878,6 +878,7 @@ struct task_struct {
	bool misfit;
	u32 unfilter;
	bool low_latency;
	bool rtg_high_prio;
#endif

#ifdef CONFIG_CGROUP_SCHED
+1 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@ extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_window_nr_ticks;
extern unsigned int sysctl_sched_dynamic_ravg_window_enable;
extern unsigned int sysctl_sched_prefer_spread;
extern unsigned int sysctl_walt_rtg_cfs_boost_prio;

extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
+5 −2
Original line number Diff line number Diff line
@@ -1145,6 +1145,7 @@ TRACE_EVENT(sched_cpu_util,
		__field(int,		isolated)
		__field(int,		reserved)
		__field(int,		high_irq_load)
		__field(unsigned int,	nr_rtg_high_prio_tasks)
	),

	TP_fast_assign(
@@ -1161,14 +1162,16 @@ TRACE_EVENT(sched_cpu_util,
		__entry->isolated           = cpu_isolated(cpu);
		__entry->reserved           = is_reserved(cpu);
		__entry->high_irq_load      = sched_cpu_high_irqload(cpu);
		__entry->nr_rtg_high_prio_tasks = walt_nr_rtg_high_prio(cpu);
	),

	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u",
	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u nr_rtg_hp=%u",
		__entry->cpu, __entry->nr_running, __entry->cpu_util,
		__entry->cpu_util_cum, __entry->capacity_curr,
		__entry->capacity, __entry->capacity_orig,
		__entry->idle_state, __entry->irqload, __entry->online,
		__entry->isolated, __entry->reserved, __entry->high_irq_load)
		__entry->isolated, __entry->reserved, __entry->high_irq_load,
		__entry->nr_rtg_high_prio_tasks)
);

TRACE_EVENT(sched_compute_energy,
+35 −3
Original line number Diff line number Diff line
@@ -176,6 +176,7 @@ unsigned int sysctl_sched_min_task_util_for_boost = 51;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35;
__read_mostly unsigned int sysctl_sched_prefer_spread;
unsigned int sysctl_walt_rtg_cfs_boost_prio = 99; /* disabled by default */
#endif
unsigned int sched_small_task_threshold = 102;

@@ -4007,6 +4008,11 @@ static inline void adjust_cpus_for_packing(struct task_struct *p,
	if (prefer_spread_on_idle(*best_idle_cpu))
		fbt_env->need_idle |= 2;

	if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) {
		*target_cpu = -1;
		return;
	}

	if (fbt_env->need_idle || task_placement_boost_enabled(p) || boosted ||
		shallowest_idle_cstate <= 0) {
		*target_cpu = -1;
@@ -4136,6 +4142,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
				se->vruntime = min_vruntime(vruntime,
							se->vruntime);
				return;
			} else if (task_rtg_high_prio(task_of(se))) {
				vruntime -= thresh;
				se->vruntime = min_vruntime(vruntime,
							se->vruntime);
				return;
			}
#endif
		}
@@ -6930,6 +6941,8 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
	int prev_cpu = task_cpu(p);
	bool next_group_higher_cap = false;
	int isolated_candidate = -1;
	unsigned int target_nr_rtg_high_prio = UINT_MAX;
	bool rtg_high_prio_task = task_rtg_high_prio(p);

	/*
	 * In most cases, target_capacity tracks capacity_orig of the most
@@ -7231,12 +7244,31 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
			 * capacity.
			 */

			/*
			 * Try to spread the rtg high prio tasks so that they
			 * don't preempt each other. This is a optimisitc
			 * check assuming rtg high prio can actually preempt
			 * the current running task with the given vruntime
			 * boost.
			 */
			if (rtg_high_prio_task)  {
				if (walt_nr_rtg_high_prio(i) > target_nr_rtg_high_prio)
					continue;

				/* Favor CPUs with maximum spare capacity */
				if (walt_nr_rtg_high_prio(i) == target_nr_rtg_high_prio &&
						spare_cap < target_max_spare_cap)
					continue;

			} else {
				/* Favor CPUs with maximum spare capacity */
				if (spare_cap < target_max_spare_cap)
					continue;
			}

			target_max_spare_cap = spare_cap;
			target_capacity = capacity_orig;
			target_nr_rtg_high_prio = walt_nr_rtg_high_prio(i);
			target_cpu = i;
		}

+12 −0
Original line number Diff line number Diff line
@@ -105,6 +105,7 @@ struct walt_sched_stats {
	int nr_big_tasks;
	u64 cumulative_runnable_avg_scaled;
	u64 pred_demands_sum_scaled;
	unsigned int nr_rtg_high_prio_tasks;
};

struct group_cpu_time {
@@ -2896,6 +2897,12 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
	return rcu_dereference(p->grp);
}

static inline bool task_rtg_high_prio(struct task_struct *p)
{
	return task_in_related_thread_group(p) &&
		(p->prio <= sysctl_walt_rtg_cfs_boost_prio);
}

/* Is frequency of two cpus synchronized with each other? */
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
@@ -3130,6 +3137,11 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
	return NULL;
}

static inline bool task_rtg_high_prio(struct task_struct *p)
{
	return false;
}

static inline u32 task_load(struct task_struct *p) { return 0; }
static inline u32 task_pl(struct task_struct *p) { return 0; }

Loading