Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 51f04119 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Joonwoo Park
Browse files

sched: EAS: add infrastructure for core_ctl



- Add sched_get_cpu_util() API that returns the CPU utilization
in pct for the last window tracked by WALT. core_ctl can use
this API to query the load of all CPUs from WALT window rollover
callback.

- Track number of misfit tasks (also called big tasks). A task
is considered misfit when its utilization is more than the
capacity of the CPU that is hosting it. misfit flag is added
per task so that we don't have to deal with updating
rq->hmp_stats.nr_big_tasks when task boost is changed or cpufreq
policy is updated. When a task is running on max capacity, it
won't be called misfit task. Any task running on the max capacity
CPU is considered as a BIG task by sched_get_nr_running_avg() API.

Change-Id: Ia15962f346e52bd3cfa08bb6f119082840ca893d
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 0587a6af
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -179,6 +179,7 @@ extern u64 nr_running_integral(unsigned int cpu);

extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
extern unsigned int sched_get_cpu_util(int cpu);

extern void calc_global_load(unsigned long ticks);

@@ -1675,6 +1676,7 @@ struct task_struct {
	struct related_thread_group *grp;
	struct list_head grp_list;
	u64 cpu_cycles;
	bool misfit;
#endif

#ifdef CONFIG_CGROUP_SCHED
+2 −2
Original line number Diff line number Diff line
@@ -600,7 +600,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
			cfs_rq->throttle_count);
	SEQ_printf(m, "  .%-30s: %d\n", "runtime_enabled",
			cfs_rq->runtime_enabled);
#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_SCHED_WALT
	SEQ_printf(m, "  .%-30s: %d\n", "nr_big_tasks",
			cfs_rq->hmp_stats.nr_big_tasks);
	SEQ_printf(m, "  .%-30s: %llu\n", "cumulative_runnable_avg",
@@ -709,7 +709,7 @@ do { \
	P(cluster->cur_freq);
	P(cluster->max_freq);
	P(cluster->exec_scale_factor);
#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_SCHED_WALT
	P(hmp_stats.nr_big_tasks);
#endif
	SEQ_printf(m, "  .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
+18 −1
Original line number Diff line number Diff line
@@ -125,6 +125,10 @@ static inline bool update_sd_pick_busiest_active_balance(struct lb_env *env,

#endif /* CONFIG_SCHED_HMP */

#ifdef CONFIG_SCHED_WALT
static inline bool task_fits_max(struct task_struct *p, int cpu);
#endif

/*
 * Targeted preemption latency for CPU-bound tasks:
 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -4758,6 +4762,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

	if (!se) {
		add_nr_running(rq, 1);
#ifdef CONFIG_SCHED_WALT
		p->misfit = !task_fits_max(p, rq->cpu);
#endif
		inc_rq_hmp_stats(rq, p, 1);
	}

@@ -10572,6 +10579,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;
#ifdef CONFIG_SMP
	bool old_misfit = curr->misfit;
	bool misfit;
#endif

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
@@ -10587,7 +10598,13 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
		trace_sched_overutilized(true);
	}

	rq->misfit_task = !task_fits_max(curr, rq->cpu);
	misfit = !task_fits_max(curr, rq->cpu);
	rq->misfit_task = misfit;

	if (old_misfit != misfit) {
		adjust_nr_big_tasks(&rq->hmp_stats, 1, misfit);
		curr->misfit = misfit;
	}
#endif

}
+0 −18
Original line number Diff line number Diff line
@@ -807,24 +807,6 @@ unsigned int pct_task_load(struct task_struct *p)
	return load;
}

/*
 * Return total number of tasks "eligible" to run on highest capacity cpu
 *
 * This is simply nr_big_tasks for cpus which are not of max_capacity and
 * nr_running for cpus of max_capacity
 */
unsigned int nr_eligible_big_tasks(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	int nr_big = rq->hmp_stats.nr_big_tasks;
	int nr = rq->nr_running;

	if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
		return nr_big;

	return nr;
}

static int __init set_sched_ravg_window(char *str)
{
	unsigned int window_size;
+0 −11
Original line number Diff line number Diff line
@@ -2898,16 +2898,5 @@ static inline void update_avg_burst(struct task_struct *p) { }
static inline void set_task_last_switch_out(struct task_struct *p,
					    u64 wallclock) { }

static inline unsigned int nr_eligible_big_tasks(int cpu)
{
	return 0;
}

static inline void
inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p) { }

static inline void
dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p) { }

static inline void clear_boost_kick(int cpu) { }
#endif /* CONFIG_SCHED_HMP */
Loading