Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25de011c authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

sched: improve the scheduler



This change is for general scheduler improvement.

CRs-Fixed: 2040904
Change-Id: Ibd091e86c0a60ae8db080c6d7dfae7acdfb1812d
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent 20acfe73
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -801,6 +801,9 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
	if (task_contributes_to_load(p))
		rq->nr_uninterruptible++;

	if (flags & DEQUEUE_SLEEP)
		clear_ed_task(p, rq);

	dequeue_task(rq, p, flags);
}

@@ -8425,6 +8428,7 @@ void __init sched_init(void)
		rq->old_estimated_time = 0;
		rq->old_busy_time_group = 0;
		rq->hmp_stats.pred_demands_sum = 0;
		rq->ed_task = NULL;
		rq->curr_table = 0;
		rq->prev_top = 0;
		rq->curr_top = 0;
+0 −57
Original line number Diff line number Diff line
@@ -1236,39 +1236,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
		sched_ktime_clock() - start_ts, reason, old, new);
}

/*
 * In this function we match the accumulated subtractions with the current
 * and previous windows we are operating with. Ignore any entries where
 * the window start in the load_subtraction struct does not match either
 * the curent or the previous window. This could happen whenever CPUs
 * become idle or busy with interrupts disabled for an extended period.
 */
static inline void account_load_subtractions(struct rq *rq)
{
	u64 ws = rq->window_start;
	u64 prev_ws = ws - sched_ravg_window;
	struct load_subtractions *ls = rq->load_subs;
	int i;

	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
		if (ls[i].window_start == ws) {
			rq->curr_runnable_sum -= ls[i].subs;
			rq->nt_curr_runnable_sum -= ls[i].new_subs;
		} else if (ls[i].window_start == prev_ws) {
			rq->prev_runnable_sum -= ls[i].subs;
			rq->nt_prev_runnable_sum -= ls[i].new_subs;
		}

		ls[i].subs = 0;
		ls[i].new_subs = 0;
	}

	BUG_ON((s64)rq->prev_runnable_sum < 0);
	BUG_ON((s64)rq->curr_runnable_sum < 0);
	BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
}

void sched_get_cpus_busy(struct sched_load *busy,
			 const struct cpumask *query_cpus)
{
@@ -1634,30 +1601,6 @@ static int register_sched_callback(void)
 */
core_initcall(register_sched_callback);

bool early_detection_notify(struct rq *rq, u64 wallclock)
{
	struct task_struct *p;
	int loop_max = 10;

	if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
		return 0;

	rq->ed_task = NULL;
	list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
		if (!loop_max)
			break;

		if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
			rq->ed_task = p;
			return 1;
		}

		loop_max--;
	}

	return 0;
}

void update_avg_burst(struct task_struct *p)
{
	update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
+9 −9
Original line number Diff line number Diff line
@@ -2672,6 +2672,8 @@ extern int got_boost_kick(void);
extern void clear_boost_kick(int cpu);
extern enum sched_boost_policy sched_boost_policy(void);
extern void sched_boost_parse_dt(void);
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
extern bool early_detection_notify(struct rq *rq, u64 wallclock);

#else	/* CONFIG_SCHED_WALT */

@@ -2818,6 +2820,13 @@ static inline enum sched_boost_policy sched_boost_policy(void)

static inline void sched_boost_parse_dt(void) { }

static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }

static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
{
	return 0;
}

#endif	/* CONFIG_SCHED_WALT */

#ifdef CONFIG_SCHED_HMP
@@ -2830,10 +2839,8 @@ extern void notify_migration(int src_cpu, int dest_cpu,
extern void note_task_waking(struct task_struct *p, u64 wallclock);
extern void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
					struct task_struct *p, s64 delta);
extern bool early_detection_notify(struct rq *rq, u64 wallclock);
extern unsigned int power_cost(int cpu, u64 demand);
extern unsigned int cpu_temp(int cpu);
extern void pre_big_task_count_change(const struct cpumask *cpus);
@@ -2888,16 +2895,9 @@ static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
static inline void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }

static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }

static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
				      struct task_struct *p, s64 delta) { }

static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
{
	return 0;
}

static inline unsigned int power_cost(int cpu, u64 demand)
{
	return SCHED_CAPACITY_SCALE;
+84 −7
Original line number Diff line number Diff line
@@ -40,6 +40,8 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
#define SCHED_ACCOUNT_WAIT_TIME 1

#define EARLY_DETECTION_DURATION 9500000

static ktime_t ktime_last;
static bool sched_ktime_suspended;
static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
@@ -341,6 +343,36 @@ static void update_task_cpu_cycles(struct task_struct *p, int cpu)
		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
}

void clear_ed_task(struct task_struct *p, struct rq *rq)
{
	if (p == rq->ed_task)
		rq->ed_task = NULL;
}

bool early_detection_notify(struct rq *rq, u64 wallclock)
{
	struct task_struct *p;
	int loop_max = 10;

	if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
		return 0;

	rq->ed_task = NULL;
	list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
		if (!loop_max)
			break;

		if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
			rq->ed_task = p;
			return 1;
		}

		loop_max--;
	}

	return 0;
}

void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
{
	struct rq *rq = cpu_rq(cpu);
@@ -466,6 +498,9 @@ u64 freq_policy_load(struct rq *rq)
	u64 aggr_grp_load = cluster->aggr_grp_load;
	u64 load;

	if (rq->ed_task != NULL)
		return sched_ravg_window;

	if (aggr_grp_load > sched_freq_aggregate_threshold)
		load = rq->prev_runnable_sum + aggr_grp_load;
	else
@@ -487,6 +522,39 @@ u64 freq_policy_load(struct rq *rq)
	return load;
}

/*
 * In this function we match the accumulated subtractions with the current
 * and previous windows we are operating with. Ignore any entries where
 * the window start in the load_subtraction struct does not match either
 * the curent or the previous window. This could happen whenever CPUs
 * become idle or busy with interrupts disabled for an extended period.
 */
static inline void account_load_subtractions(struct rq *rq)
{
	u64 ws = rq->window_start;
	u64 prev_ws = ws - sched_ravg_window;
	struct load_subtractions *ls = rq->load_subs;
	int i;

	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
		if (ls[i].window_start == ws) {
			rq->curr_runnable_sum -= ls[i].subs;
			rq->nt_curr_runnable_sum -= ls[i].new_subs;
		} else if (ls[i].window_start == prev_ws) {
			rq->prev_runnable_sum -= ls[i].subs;
			rq->nt_prev_runnable_sum -= ls[i].new_subs;
		}

		ls[i].subs = 0;
		ls[i].new_subs = 0;
	}

	BUG_ON((s64)rq->prev_runnable_sum < 0);
	BUG_ON((s64)rq->curr_runnable_sum < 0);
	BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
}

static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
{
	rq->load_subs[index].window_start = ws;
@@ -2927,6 +2995,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 */
void walt_irq_work(struct irq_work *irq_work)
{
	struct sched_cluster *cluster;
	struct rq *rq;
	int cpu;
	u64 wc;
@@ -2936,15 +3005,23 @@ void walt_irq_work(struct irq_work *irq_work)

	wc = sched_ktime_clock();

	for_each_cpu(cpu, cpu_possible_mask) {
		if (cpu == smp_processor_id())
			continue;
	for_each_sched_cluster(cluster) {
		raw_spin_lock(&cluster->load_lock);

		for_each_cpu(cpu, &cluster->cpus) {
			rq = cpu_rq(cpu);
		if (rq->curr)
			update_task_ravg(rq->curr, rq, TASK_UPDATE, wc, 0);
			if (rq->curr) {
				update_task_ravg(rq->curr, rq,
						TASK_UPDATE, wc, 0);
				account_load_subtractions(rq);
			}

			cpufreq_update_util(rq, 0);
		}

		raw_spin_unlock(&cluster->load_lock);
	}

	for_each_cpu(cpu, cpu_possible_mask)
		raw_spin_unlock(&cpu_rq(cpu)->lock);