Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d12365df authored by Amir Vajid's avatar Amir Vajid
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I8ff4768d56d8e63b2cfa78e5f34cb156ee60e3da
Signed-off-by: default avatarAmir Vajid <avajid@codeaurora.org>
parent f6e1b918
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ extern unsigned int sysctl_sched_busy_hyst_enable_cpus;
extern unsigned int sysctl_sched_busy_hyst;
extern unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
extern unsigned int sysctl_sched_coloc_busy_hyst;
extern unsigned int sysctl_sched_coloc_busy_hyst_max_ms;

extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
+1 −0
Original line number Diff line number Diff line
@@ -2659,6 +2659,7 @@ struct related_thread_group {
	struct rcu_head rcu;
	u64 last_update;
	u64 downmigrate_ts;
	u64 start_ts;
};

extern struct sched_cluster *sched_cluster[NR_CPUS];
+15 −2
Original line number Diff line number Diff line
@@ -30,11 +30,13 @@ unsigned int sysctl_sched_busy_hyst_enable_cpus;
unsigned int sysctl_sched_busy_hyst;
unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
unsigned int sysctl_sched_coloc_busy_hyst = 39000000;
unsigned int sysctl_sched_coloc_busy_hyst_max_ms = 5000;
static DEFINE_PER_CPU(atomic64_t, busy_hyst_end_time) = ATOMIC64_INIT(0);
static DEFINE_PER_CPU(u64, hyst_time);
#endif
static DEFINE_PER_CPU(u64, hyst_time);

#define NR_THRESHOLD_PCT		15
#define MAX_RTGB_TIME (sysctl_sched_coloc_busy_hyst_max_ms * NSEC_PER_MSEC)

/**
 * sched_get_nr_running_avg
@@ -51,6 +53,7 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
	u64 curr_time = sched_clock();
	u64 period = curr_time - last_get_time;
	u64 tmp_nr, tmp_misfit;
	bool any_hyst_time = false;

	if (!period)
		return;
@@ -97,6 +100,15 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
	}

	for_each_possible_cpu(cpu) {
		if (per_cpu(hyst_time, cpu)) {
			any_hyst_time = true;
			break;
		}
	}
	if (any_hyst_time && get_rtgb_active_time() >= MAX_RTGB_TIME)
		sched_update_hyst_times();

	last_get_time = curr_time;

}
@@ -109,7 +121,8 @@ void sched_update_hyst_times(void)
	bool rtgb_active;
	int cpu;

	rtgb_active = is_rtgb_active() && sched_boost() != CONSERVATIVE_BOOST;
	rtgb_active = is_rtgb_active() && (sched_boost() != CONSERVATIVE_BOOST)
			&& (get_rtgb_active_time() < MAX_RTGB_TIME);

	for_each_possible_cpu(cpu) {
		std_time = (BIT(cpu)
+17 −2
Original line number Diff line number Diff line
@@ -2723,9 +2723,11 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
	trace_sched_set_preferred_cluster(grp, combined_demand);
out:
	if (grp->id == DEFAULT_CGROUP_COLOC_ID
	    && grp->skip_min != prev_skip_min)
	    && grp->skip_min != prev_skip_min) {
		if (grp->skip_min)
			grp->start_ts = sched_clock();
		sched_update_hyst_times();

	}
}

void set_preferred_cluster(struct related_thread_group *grp)
@@ -3204,6 +3206,19 @@ bool is_rtgb_active(void)
	return grp && grp->skip_min;
}

u64 get_rtgb_active_time(void)
{
	struct related_thread_group *grp;
	u64 now = sched_clock();

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);

	if (grp && grp->skip_min && grp->start_ts)
		return now - grp->start_ts;

	return 0;
}

/*
 * Runs in hard-irq context. This should ideally run just after the latest
 * window roll-over.
+5 −0
Original line number Diff line number Diff line
@@ -324,6 +324,7 @@ static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
}

extern bool is_rtgb_active(void);
extern u64 get_rtgb_active_time(void);

#else /* CONFIG_SCHED_WALT */

@@ -411,6 +412,10 @@ static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
	return false;
}

static inline u64 get_rtgb_active_time(void)
{
	return 0;
}
#endif /* CONFIG_SCHED_WALT */

#endif
Loading