Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 160e2a17 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: walt: move the cumulative window demand updates to enqueue"

parents 38d08092 0cebff04
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -2187,9 +2187,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
		notif_required = true;
	}

	if (!__task_in_cum_window_demand(cpu_rq(cpu), p))
		inc_cum_window_demand(cpu_rq(cpu), p, task_load(p));

	note_task_waking(p, wallclock);
#endif /* CONFIG_SMP */

@@ -2249,8 +2246,6 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie

		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
		if (!__task_in_cum_window_demand(rq, p))
			inc_cum_window_demand(rq, p, task_load(p));
		cpufreq_update_util(rq, 0);
		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
		note_task_waking(p, wallclock);
@@ -9577,7 +9572,12 @@ void sched_exit(struct task_struct *p)
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	dequeue_task(rq, p, 0);
	dec_cum_window_demand(rq, p);
	/*
	 * task's contribution is already removed from the
	 * cumulative window demand in dequeue. As the
	 * task's stats are reset, the next enqueue does
	 * not change the cumulative window demand.
	 */
	reset_task_stats(p);
	p->ravg.mark_start = wallclock;
	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+5 −27
Original line number Diff line number Diff line
@@ -2619,32 +2619,20 @@ static inline void clear_reserved(int cpu)
	clear_bit(CPU_RESERVED, &rq->walt_flags);
}

static inline bool
__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
	return (p->on_rq || p->last_sleep_ts >= rq->window_start);
}

static inline bool
task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
	return cpu_of(rq) == task_cpu(p) && __task_in_cum_window_demand(rq, p);
	return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >=
							 rq->window_start);
}

static inline void
dec_cum_window_demand(struct rq *rq, struct task_struct *p)
static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta)
{
	rq->cum_window_demand -= p->ravg.demand;
	rq->cum_window_demand += delta;
	if (unlikely((s64)rq->cum_window_demand < 0))
		rq->cum_window_demand = 0;
}

static inline void
inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta)
{
	rq->cum_window_demand += delta;
}

extern void update_cpu_cluster_capacity(const cpumask_t *cpus);

extern unsigned long thermal_cap(int cpu);
@@ -2743,17 +2731,7 @@ static inline int alloc_related_thread_groups(void) { return 0; }
#define trace_sched_cpu_load_cgroup(...)
#define trace_sched_cpu_load_wakeup(...)

static inline bool
__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
	return 0;
}

static inline void
dec_cum_window_demand(struct rq *rq, struct task_struct *p) { }

static inline void
inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta) { }
static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) { }

static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }

+25 −17
Original line number Diff line number Diff line
@@ -227,6 +227,8 @@ void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,

	fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta,
				      pred_demand_delta);

	walt_fixup_cum_window_demand(rq, task_load_delta);
}

/*
@@ -295,13 +297,6 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;

	rq->cum_window_demand = rq->walt_stats.cumulative_runnable_avg;
	/*
	 * If the window is rolled over when the task is dequeued, this
	 * task demand is not included in cumulative_runnable_avg. So
	 * add it separately to the cumulative window demand.
	 */
	if (!rq->curr->on_rq && event == PUT_PREV_TASK)
		rq->cum_window_demand += rq->curr->ravg.demand;

	return old_window_start;
}
@@ -778,9 +773,15 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)

	update_task_cpu_cycles(p, new_cpu);

	if (__task_in_cum_window_demand(src_rq, p)) {
		dec_cum_window_demand(src_rq, p);
		inc_cum_window_demand(dest_rq, p, p->ravg.demand);
	/*
	 * When a task is migrating during the wakeup, adjust
	 * the task's contribution towards cumulative window
	 * demand.
	 */
	if (p->state == TASK_WAKING && p->last_sleep_ts >=
				       src_rq->window_start) {
		walt_fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
		walt_fixup_cum_window_demand(dest_rq, p->ravg.demand);
	}

	new_task = is_new_task(p);
@@ -1658,19 +1659,26 @@ static void update_history(struct rq *rq, struct task_struct *p,
	 * A throttled deadline sched class task gets dequeued without
	 * changing p->on_rq. Since the dequeue decrements walt stats
	 * avoid decrementing it here again.
	 */
	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
						!p->dl.dl_throttled))
	 *
	 * When window is rolled over, the cumulative window demand
	 * is reset to the cumulative runnable average (contribution from
	 * the tasks on the runqueue). If the current task is dequeued
	 * already, it's demand is not included in the cumulative runnable
	 * average. So add the task demand separately to cumulative window
	 * demand.
	 */
	if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
		if (task_on_rq_queued(p))
			p->sched_class->fixup_walt_sched_stats(rq, p, demand,
							       pred_demand);
		else if (rq->curr == p)
			walt_fixup_cum_window_demand(rq, demand);
	}

	p->ravg.demand = demand;
	p->ravg.coloc_demand = div64_u64(sum, sched_ravg_hist_size);
	p->ravg.pred_demand = pred_demand;

	if (__task_in_cum_window_demand(rq, p))
		inc_cum_window_demand(rq, p, p->ravg.demand - prev_demand);

done:
	trace_sched_update_history(rq, p, runtime, samples, event);
}
+18 −0
Original line number Diff line number Diff line
@@ -121,6 +121,16 @@ walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)

	fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand,
				      p->ravg.pred_demand);

	/*
	 * Add a task's contribution to the cumulative window demand when
	 *
	 * (1) task is enqueued with on_rq = 1 i.e migration,
	 *     prio/cgroup/class change.
	 * (2) task is waking for the first time in this window.
	 */
	if (p->on_rq || (p->last_sleep_ts < rq->window_start))
		walt_fixup_cum_window_demand(rq, p->ravg.demand);
}

static inline void
@@ -131,6 +141,14 @@ walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)

	fixup_cumulative_runnable_avg(&rq->walt_stats, -(s64)p->ravg.demand,
				      -(s64)p->ravg.pred_demand);

	/*
	 * on_rq will be 1 for sleeping tasks. So check if the task
	 * is migrating or dequeuing in RUNNING state to change the
	 * prio/cgroup/class.
	 */
	if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
		walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
}

extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,