Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6e1b918 authored by Amir Vajid's avatar Amir Vajid
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I737751f065df6a5ed3093e3bda5e48750a14e4c9
Signed-off-by: default avatarAmir Vajid <avajid@codeaurora.org>
parent 6bdecf57
Loading
Loading
Loading
Loading
+8 −5
Original line number Diff line number Diff line
@@ -25,7 +25,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
#ifdef CONFIG_SMP
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern unsigned int sched_get_cpu_util(int cpu);
extern u64 sched_get_cpu_last_busy_time(int cpu);
#else
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
{
@@ -34,16 +33,20 @@ static inline unsigned int sched_get_cpu_util(int cpu)
{
	return 0;
}
static inline u64 sched_get_cpu_last_busy_time(int cpu)
{
	return 0;
}
#endif

#ifdef CONFIG_SCHED_WALT
extern void sched_update_hyst_times(void);
extern u64 sched_lpm_disallowed_time(int cpu);
#else
static inline void sched_update_hyst_times(void)
{
}
static inline u64 sched_lpm_disallowed_time(int cpu)
{
	return 0;
}
#endif

static inline int sched_info_on(void)
{
+4 −1
Original line number Diff line number Diff line
@@ -46,6 +46,10 @@ extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;
extern unsigned int sysctl_sched_coloc_downmigrate_ns;
extern unsigned int sysctl_sched_task_unfilter_nr_windows;
extern unsigned int sysctl_sched_busy_hyst_enable_cpus;
extern unsigned int sysctl_sched_busy_hyst;
extern unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
extern unsigned int sysctl_sched_coloc_busy_hyst;

extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
@@ -132,6 +136,5 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
extern char sched_lib_name[LIB_PATH_LENGTH];
extern unsigned int sched_lib_mask_force;
extern bool is_sched_lib_based_app(pid_t pid);
extern unsigned int sysctl_sched_busy_hysteresis_enable_cpus;

#endif /* _LINUX_SCHED_SYSCTL_H */
+49 −8
Original line number Diff line number Diff line
@@ -25,8 +25,14 @@ static DEFINE_PER_CPU(u64, nr_max);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;

unsigned int sysctl_sched_busy_hysteresis_enable_cpus;
static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_busy_hyst_enable_cpus;
unsigned int sysctl_sched_busy_hyst;
unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
unsigned int sysctl_sched_coloc_busy_hyst = 39000000;
static DEFINE_PER_CPU(atomic64_t, busy_hyst_end_time) = ATOMIC64_INIT(0);
static DEFINE_PER_CPU(u64, hyst_time);
#endif

#define NR_THRESHOLD_PCT		15

@@ -96,14 +102,34 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
}
EXPORT_SYMBOL(sched_get_nr_running_avg);

#ifdef CONFIG_SCHED_WALT
void sched_update_hyst_times(void)
{
	u64 std_time, rtgb_time;
	bool rtgb_active;
	int cpu;

	rtgb_active = is_rtgb_active() && sched_boost() != CONSERVATIVE_BOOST;

	for_each_possible_cpu(cpu) {
		std_time = (BIT(cpu)
			     & sysctl_sched_busy_hyst_enable_cpus) ?
			     sysctl_sched_busy_hyst : 0;
		rtgb_time = ((BIT(cpu)
			     & sysctl_sched_coloc_busy_hyst_enable_cpus)
			     && rtgb_active) ? sysctl_sched_coloc_busy_hyst : 0;
		per_cpu(hyst_time, cpu) = max(std_time, rtgb_time);
	}
}

#define BUSY_NR_RUN		3
#define BUSY_LOAD_FACTOR	10
static inline void update_last_busy_time(int cpu, bool dequeue,
static inline void update_busy_hyst_end_time(int cpu, bool dequeue,
				unsigned long prev_nr_run, u64 curr_time)
{
	bool nr_run_trigger = false, load_trigger = false;

	if (!(BIT(cpu) & sysctl_sched_busy_hysteresis_enable_cpus))
	if (!per_cpu(hyst_time, cpu))
		return;

	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
@@ -114,8 +140,15 @@ static inline void update_last_busy_time(int cpu, bool dequeue,
		load_trigger = true;

	if (nr_run_trigger || load_trigger)
		atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
		atomic64_set(&per_cpu(busy_hyst_end_time, cpu),
				curr_time + per_cpu(hyst_time, cpu));
}
#else
static inline void update_busy_hyst_end_time(int cpu, bool dequeue,
				unsigned long prev_nr_run, u64 curr_time)
{
}
#endif

/**
 * sched_update_nr_prod
@@ -145,7 +178,7 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);

	update_last_busy_time(cpu, !inc, nr_running, curr_time);
	update_busy_hyst_end_time(cpu, !inc, nr_running, curr_time);

	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
	per_cpu(nr_big_prod_sum, cpu) += walt_big_tasks(cpu) * diff;
@@ -180,7 +213,15 @@ unsigned int sched_get_cpu_util(int cpu)
	return busy;
}

u64 sched_get_cpu_last_busy_time(int cpu)
#ifdef CONFIG_SCHED_WALT
u64 sched_lpm_disallowed_time(int cpu)
{
	return atomic64_read(&per_cpu(last_busy_time, cpu));
	u64 now = sched_clock();
	u64 bias_end_time = atomic64_read(&per_cpu(busy_hyst_end_time, cpu));

	if (now < bias_end_time)
		return bias_end_time - now;

	return 0;
}
#endif
+12 −4
Original line number Diff line number Diff line
@@ -2678,13 +2678,16 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
	u64 combined_demand = 0;
	bool group_boost = false;
	u64 wallclock;
	bool prev_skip_min = grp->skip_min;

	if (list_empty(&grp->tasks))
		return;
	if (list_empty(&grp->tasks)) {
		grp->skip_min = false;
		goto out;
	}

	if (!hmp_capable()) {
		grp->skip_min = false;
		return;
		goto out;
	}

	wallclock = sched_ktime_clock();
@@ -2718,6 +2721,11 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
	grp->last_update = wallclock;
	update_best_cluster(grp, combined_demand, group_boost);
	trace_sched_set_preferred_cluster(grp, combined_demand);
out:
	if (grp->id == DEFAULT_CGROUP_COLOC_ID
	    && grp->skip_min != prev_skip_min)
		sched_update_hyst_times();

}

void set_preferred_cluster(struct related_thread_group *grp)
@@ -3188,7 +3196,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}

static bool is_rtgb_active(void)
bool is_rtgb_active(void)
{
	struct related_thread_group *grp;

+2 −0
Original line number Diff line number Diff line
@@ -323,6 +323,8 @@ static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
	return false;
}

extern bool is_rtgb_active(void);

#else /* CONFIG_SCHED_WALT */

static inline void walt_sched_init_rq(struct rq *rq) { }
Loading