Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38d08092 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: walt: fix cumulative window demand update bugs"

parents 81c065e9 bc4cef7b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -9577,8 +9577,8 @@ void sched_exit(struct task_struct *p)
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	dequeue_task(rq, p, 0);
	reset_task_stats(p);
	dec_cum_window_demand(rq, p);
	reset_task_stats(p);
	p->ravg.mark_start = wallclock;
	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
	free_task_load_ptrs(p);
+2 −1
Original line number Diff line number Diff line
@@ -2635,7 +2635,8 @@ static inline void
dec_cum_window_demand(struct rq *rq, struct task_struct *p)
{
	rq->cum_window_demand -= p->ravg.demand;
	WARN_ON_ONCE(rq->cum_window_demand < 0);
	if (unlikely((s64)rq->cum_window_demand < 0))
		rq->cum_window_demand = 0;
}

static inline void
+6 −1
Original line number Diff line number Diff line
@@ -295,7 +295,12 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;

	rq->cum_window_demand = rq->walt_stats.cumulative_runnable_avg;
	if (event == PUT_PREV_TASK)
	/*
	 * If the window is rolled over when the task is dequeued, this
	 * task demand is not included in cumulative_runnable_avg. So
	 * add it separately to the cumulative window demand.
	 */
	if (!rq->curr->on_rq && event == PUT_PREV_TASK)
		rq->cum_window_demand += rq->curr->ravg.demand;

	return old_window_start;