Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit beb0ab58 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: improve the scheduler"

parents 8a084f57 ff3a4812
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost_colocation;
extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
@@ -122,4 +123,10 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp,
				 loff_t *ppos);

#ifdef CONFIG_SCHED_WALT
extern int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);
#endif

#endif /* _LINUX_SCHED_SYSCTL_H */
+18 −4
Original line number Diff line number Diff line
@@ -493,8 +493,14 @@ DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,

TRACE_EVENT(sched_load_to_gov,

	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, u64 freq_aggr_thresh, u64 load, int policy, int big_task_rotation),
	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy, big_task_rotation),
	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
		u64 freq_aggr_thresh, u64 load, int policy,
		int big_task_rotation,
		unsigned int sysctl_sched_little_cluster_coloc_fmin_khz,
		u64 coloc_boost_load),
	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy,
		big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz,
		coloc_boost_load),

	TP_STRUCT__entry(
		__field(	int,	cpu			)
@@ -510,6 +516,9 @@ TRACE_EVENT(sched_load_to_gov,
		__field(	u64,	pl			)
		__field(	u64,    load			)
		__field(	int,    big_task_rotation	)
		__field(unsigned int,
				sysctl_sched_little_cluster_coloc_fmin_khz)
		__field(u64,	coloc_boost_load)
	),

	TP_fast_assign(
@@ -526,13 +535,18 @@ TRACE_EVENT(sched_load_to_gov,
		__entry->pl		= rq->walt_stats.pred_demands_sum;
		__entry->load		= load;
		__entry->big_task_rotation = big_task_rotation;
		__entry->sysctl_sched_little_cluster_coloc_fmin_khz =
				sysctl_sched_little_cluster_coloc_fmin_khz;
		__entry->coloc_boost_load = coloc_boost_load;
	),

	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu",
		__entry->cpu, __entry->policy, __entry->ed_task_pid,
		__entry->aggr_grp_load, __entry->freq_aggr_thresh,
		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
		__entry->big_task_rotation)
		__entry->big_task_rotation,
		__entry->sysctl_sched_little_cluster_coloc_fmin_khz,
		__entry->coloc_boost_load)
);
#endif
+2 −0
Original line number Diff line number Diff line
@@ -274,6 +274,8 @@ static int sched_energy_probe(struct platform_device *pdev)

	kfree(max_frequencies);

	walt_map_freq_to_load();

	dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n");
	return 0;

+9 −0
Original line number Diff line number Diff line
@@ -108,6 +108,7 @@ struct sched_cluster {
	int notifier_sent;
	bool wake_up_idle;
	u64 aggr_grp_load;
	u64 coloc_boost_load;
};

extern unsigned int sched_disable_window_stats;
@@ -2928,6 +2929,13 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
	return policy;
}

extern void walt_map_freq_to_load(void);

static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
{
	return is_min_capacity_cpu(cluster_first_cpu(cluster));
}

#else	/* CONFIG_SCHED_WALT */

struct walt_sched_stats;
@@ -3075,6 +3083,7 @@ static inline unsigned int power_cost(int cpu, u64 demand)
#endif

static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
static inline void walt_map_freq_to_load(void) { }
#endif	/* CONFIG_SCHED_WALT */

static inline bool energy_aware(void)
+76 −1
Original line number Diff line number Diff line
@@ -465,6 +465,7 @@ u64 freq_policy_load(struct rq *rq)
	struct sched_cluster *cluster = rq->cluster;
	u64 aggr_grp_load = cluster->aggr_grp_load;
	u64 load, tt_load = 0;
	u64 coloc_boost_load = cluster->coloc_boost_load;

	if (rq->ed_task != NULL) {
		load = sched_ravg_window;
@@ -476,6 +477,9 @@ u64 freq_policy_load(struct rq *rq)
	else
		load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;

	if (coloc_boost_load)
		load = max_t(u64, load, coloc_boost_load);

	tt_load = top_task_load(rq);
	switch (reporting_policy) {
	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
@@ -492,7 +496,9 @@ u64 freq_policy_load(struct rq *rq)

done:
	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, freq_aggr_thresh,
				load, reporting_policy, walt_rotation_enabled);
				load, reporting_policy, walt_rotation_enabled,
				sysctl_sched_little_cluster_coloc_fmin_khz,
				coloc_boost_load);
	return load;
}

@@ -2280,6 +2286,7 @@ struct sched_cluster init_cluster = {
	.notifier_sent		=	0,
	.wake_up_idle		=	0,
	.aggr_grp_load		=	0,
	.coloc_boost_load	=	0,
};

void init_clusters(void)
@@ -3063,6 +3070,71 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}

/* Set to 1GHz by default */
unsigned int sysctl_sched_little_cluster_coloc_fmin_khz = 1000000;
static u64 coloc_boost_load;

void walt_map_freq_to_load(void)
{
	struct sched_cluster *cluster;

	for_each_sched_cluster(cluster) {
		if (is_min_capacity_cluster(cluster)) {
			int fcpu = cluster_first_cpu(cluster);

			coloc_boost_load = div64_u64(
				((u64)sched_ravg_window *
				arch_scale_cpu_capacity(NULL, fcpu) *
				sysctl_sched_little_cluster_coloc_fmin_khz),
				(u64)1024 * cpu_max_possible_freq(fcpu));
			coloc_boost_load = div64_u64(coloc_boost_load << 2, 5);
			break;
		}
	}
}

static void walt_update_coloc_boost_load(void)
{
	struct related_thread_group *grp;
	struct sched_cluster *cluster;

	if (!sysctl_sched_little_cluster_coloc_fmin_khz ||
			sysctl_sched_boost == CONSERVATIVE_BOOST)
		return;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
	if (!grp || !grp->preferred_cluster ||
			is_min_capacity_cluster(grp->preferred_cluster))
		return;

	for_each_sched_cluster(cluster) {
		if (is_min_capacity_cluster(cluster)) {
			cluster->coloc_boost_load = coloc_boost_load;
			break;
		}
	}
}

int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
				int write, void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret;
	static DEFINE_MUTEX(mutex);

	mutex_lock(&mutex);

	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (ret || !write)
		goto done;

	walt_map_freq_to_load();

done:
	mutex_unlock(&mutex);
	return ret;
}

/*
 * Runs in hard-irq context. This should ideally run just after the latest
 * window roll-over.
@@ -3100,10 +3172,13 @@ void walt_irq_work(struct irq_work *irq_work)
		}

		cluster->aggr_grp_load = aggr_grp_load;
		cluster->coloc_boost_load = 0;

		raw_spin_unlock(&cluster->load_lock);
	}

	walt_update_coloc_boost_load();

	for_each_sched_cluster(cluster) {
		cpumask_t cluster_online_cpus;
		unsigned int num_cpus, i = 1;
Loading