Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a3dced5 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: walt: Add an API for querying default related thread group load



The default related thread group which is tied to schedtune is
always active. Add an API to return this group load in percentage
scaled to the minimum capacity CPU in the system.

Change-Id: Iea20137c7c2963583da249cbf97d7098d1659b66
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent e94c88f3
Loading
Loading
Loading
Loading
+43 −0
Original line number Diff line number Diff line
@@ -3274,6 +3274,49 @@ void walt_rotation_checkpoint(int nr_big)
	walt_rotation_enabled = nr_big >= num_possible_cpus();
}

unsigned int walt_get_default_coloc_group_load(void)
{
	struct related_thread_group *grp;
	unsigned long flags;
	u64 total_demand = 0, wallclock;
	struct task_struct *p;
	int min_cap_cpu, scale = 1024;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);

	raw_spin_lock_irqsave(&grp->lock, flags);
	if (list_empty(&grp->tasks)) {
		raw_spin_unlock_irqrestore(&grp->lock, flags);
		return 0;
	}

	wallclock = sched_ktime_clock();

	list_for_each_entry(p, &grp->tasks, grp_list) {
		if (p->ravg.mark_start < wallclock -
		    (sched_ravg_window * sched_ravg_hist_size))
			continue;

		total_demand += p->ravg.coloc_demand;
	}

	raw_spin_unlock_irqrestore(&grp->lock, flags);

	/*
	 * Scale the total demand to the lowest capacity CPU and
	 * convert into percentage.
	 *
	 * P = total_demand/sched_ravg_window * 1024/scale * 100
	 */

	min_cap_cpu = this_rq()->rd->min_cap_orig_cpu;
	if (min_cap_cpu != -1)
		scale = arch_scale_cpu_capacity(NULL, min_cap_cpu);

	return div64_u64(total_demand * 1024 * 100,
			(u64)sched_ravg_window * scale);
}

int walt_proc_update_handler(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos)
+5 −0
Original line number Diff line number Diff line
@@ -304,6 +304,7 @@ static inline void walt_update_last_enqueue(struct task_struct *p)
extern void walt_rotate_work_init(void);
extern void walt_rotation_checkpoint(int nr_big);
extern unsigned int walt_rotation_enabled;
extern unsigned int walt_get_default_coloc_group_load(void);

#else /* CONFIG_SCHED_WALT */

@@ -312,6 +313,10 @@ static inline void walt_sched_init_rq(struct rq *rq) { }
static inline void walt_rotate_work_init(void) { }
static inline void walt_rotation_checkpoint(int nr_big) { }
static inline void walt_update_last_enqueue(struct task_struct *p) { }
static inline unsigned int walt_get_default_coloc_group_load(void)
{
	return 0;
}

static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
				int event, u64 wallclock, u64 irqtime) { }