Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2ac8116 authored by Sai Harshini Nimmala's avatar Sai Harshini Nimmala
Browse files

sched/walt: Consolidate WALT parameters



Create specific structures to hold all WALT related parameters used in the
scheduler.
This is to separate WALT functionality from scheduler code for GKI
purposes.

Change-Id: I6013883c00f2089dba3e5d0890c9abf5f57b3da6
Signed-off-by: default avatarSai Harshini Nimmala <snimmala@codeaurora.org>
parent 3804c2a5
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -619,7 +619,7 @@ struct walt_task_struct {
	u32				unfilter;
	u64				last_wake_ts;
	u64				last_enqueued_ts;
	struct related_thread_group __rcu *grp;
	struct walt_related_thread_group __rcu	*grp;
	struct list_head		grp_list;
	u64				cpu_cycles;
	cpumask_t			cpus_requested;
+2 −2
Original line number Diff line number Diff line
@@ -475,11 +475,11 @@ TRACE_EVENT(sched_load_balance_skip_tasks,
	TP_fast_assign(
		__entry->scpu           = scpu;
		__entry->src_util_cum   =
					cpu_rq(scpu)->cum_window_demand_scaled;
				cpu_rq(scpu)->wrq.cum_window_demand_scaled;
		__entry->grp_type       = grp_type;
		__entry->dcpu           = dcpu;
		__entry->dst_util_cum   =
					cpu_rq(dcpu)->cum_window_demand_scaled;
				cpu_rq(dcpu)->wrq.cum_window_demand_scaled;
		__entry->pid            = pid;
		__entry->affinity       = affinity;
		__entry->task_util      = task_util;
+18 −18
Original line number Diff line number Diff line
@@ -1338,7 +1338,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
	uclamp_rq_dec(rq, p);
	p->sched_class->dequeue_task(rq, p, flags);
#ifdef CONFIG_SCHED_WALT
	if (p == rq->ed_task)
	if (p == rq->wrq.ed_task)
		early_detection_notify(rq, sched_ktime_clock());
#endif
	trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_mask)[0]);
@@ -3697,7 +3697,7 @@ void scheduler_tick(void)
	u64 wallclock;
	bool early_notif;
	u32 old_load;
	struct related_thread_group *grp;
	struct walt_related_thread_group *grp;
	unsigned int flag = 0;

	sched_clock_tick();
@@ -7202,7 +7202,7 @@ static void walt_schedgp_attach(struct cgroup_taskset *tset)
	cgroup_taskset_first(tset, &css);
	tg = css_tg(css);

	colocate = tg->colocate;
	colocate = tg->wtg.colocate;

	cgroup_taskset_for_each(task, css, tset)
		sync_cgroup_colocation(task, colocate);
@@ -7214,7 +7214,7 @@ sched_boost_override_read(struct cgroup_subsys_state *css,
{
	struct task_group *tg = css_tg(css);

	return (u64) tg->sched_boost_no_override;
	return (u64) tg->wtg.sched_boost_no_override;
}

static int sched_boost_override_write(struct cgroup_subsys_state *css,
@@ -7222,7 +7222,7 @@ static int sched_boost_override_write(struct cgroup_subsys_state *css,
{
	struct task_group *tg = css_tg(css);

	tg->sched_boost_no_override = !!override;
	tg->wtg.sched_boost_no_override = !!override;
	return 0;
}

@@ -7231,7 +7231,7 @@ static u64 sched_colocate_read(struct cgroup_subsys_state *css,
{
	struct task_group *tg = css_tg(css);

	return (u64) tg->colocate;
	return (u64) tg->wtg.colocate;
}

static int sched_colocate_write(struct cgroup_subsys_state *css,
@@ -7239,11 +7239,11 @@ static int sched_colocate_write(struct cgroup_subsys_state *css,
{
	struct task_group *tg = css_tg(css);

	if (tg->colocate_update_disabled)
	if (tg->wtg.colocate_update_disabled)
		return -EPERM;

	tg->colocate = !!colocate;
	tg->colocate_update_disabled = true;
	tg->wtg.colocate = !!colocate;
	tg->wtg.colocate_update_disabled = true;
	return 0;
}
#else
@@ -8412,25 +8412,25 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
		walt_update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
								delta);

	nr_ticks = cur_jiffies_ts - rq->irqload_ts;
	nr_ticks = cur_jiffies_ts - rq->wrq.irqload_ts;

	if (nr_ticks) {
		if (nr_ticks < 10) {
			/* Decay CPU's irqload by 3/4 for each window. */
			rq->avg_irqload *= (3 * nr_ticks);
			rq->avg_irqload = div64_u64(rq->avg_irqload,
			rq->wrq.avg_irqload *= (3 * nr_ticks);
			rq->wrq.avg_irqload = div64_u64(rq->wrq.avg_irqload,
							4 * nr_ticks);
		} else {
			rq->avg_irqload = 0;
			rq->wrq.avg_irqload = 0;
		}
		rq->avg_irqload += rq->cur_irqload;
		rq->high_irqload = (rq->avg_irqload >=
		rq->wrq.avg_irqload += rq->wrq.cur_irqload;
		rq->wrq.high_irqload = (rq->wrq.avg_irqload >=
				    sysctl_sched_cpu_high_irqload);
		rq->cur_irqload = 0;
		rq->wrq.cur_irqload = 0;
	}

	rq->cur_irqload += delta;
	rq->irqload_ts = cur_jiffies_ts;
	rq->wrq.cur_irqload += delta;
	rq->wrq.irqload_ts = cur_jiffies_ts;
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#endif
+1 −1
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ struct sugov_cpu {
	unsigned int		iowait_boost;
	u64			last_update;

	struct sched_walt_cpu_load walt_load;
	struct walt_cpu_load	walt_load;

	unsigned long util;
	unsigned int flags;
+3 −3
Original line number Diff line number Diff line
@@ -650,10 +650,10 @@ do { \
	P(cpu_capacity);
#endif
#ifdef CONFIG_SCHED_WALT
	P(cluster->cur_freq);
	P(walt_stats.nr_big_tasks);
	P(wrq.cluster->cur_freq);
	P(wrq.walt_stats.nr_big_tasks);
	SEQ_printf(m, "  .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
			rq->walt_stats.cumulative_runnable_avg_scaled);
			rq->wrq.walt_stats.cumulative_runnable_avg_scaled);
#endif
#undef P
#undef PN
Loading