Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25de4daf authored by Chengming Zhou's avatar Chengming Zhou Committed by Lee Jones
Browse files

UPSTREAM: sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq



Since commit 23127296889f ("sched/fair: Update scale invariance of PELT")
change to use rq_clock_pelt() instead of rq_clock_task(), we should also
use rq_clock_pelt() for throttled_clock_task_time and throttled_clock_task
accounting to get correct cfs_rq_clock_pelt() of throttled cfs_rq. And
rename throttled_clock_task(_time) to be clock_pelt rather than clock_task.

Bug: 254441685
Fixes: 23127296889f ("sched/fair: Update scale invariance of PELT")
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarBen Segall <bsegall@google.com>
Reviewed-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20220408115309.81603-1-zhouchengming@bytedance.com


(cherry picked from commit 64eaf50731ac0a8c76ce2fedd50ef6652aabc5ff)
Signed-off-by: default avatarLee Jones <joneslee@google.com>
Change-Id: I61e971d09f14708b8ee170fd5d5109144bba6e34
parent 1f9d7265
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -4475,9 +4475,9 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
	if (unlikely(cfs_rq->throttle_count))
		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
		return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;

	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
	return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
}

/* returns 0 on failure to allocate runtime */
@@ -4572,8 +4572,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
	cfs_rq->throttle_count--;
	if (!cfs_rq->throttle_count) {
		/* adjust cfs_rq_clock_task() */
		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
					     cfs_rq->throttled_clock_task;
		cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
					     cfs_rq->throttled_clock_pelt;

		/* Add cfs_rq with already running entity in the list */
		if (cfs_rq->nr_running >= 1)
@@ -4590,7 +4590,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)

	/* group is entering throttled state, stop time */
	if (!cfs_rq->throttle_count) {
		cfs_rq->throttled_clock_task = rq_clock_task(rq);
		cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
		list_del_leaf_cfs_rq(cfs_rq);
	}
	cfs_rq->throttle_count++;
@@ -4973,7 +4973,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
	pcfs_rq = tg->parent->cfs_rq[cpu];

	cfs_rq->throttle_count = pcfs_rq->throttle_count;
	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
	cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
}

/* conditionally throttle active cfs_rq's from put_prev_entity() */
+2 −2
Original line number Diff line number Diff line
@@ -127,9 +127,9 @@ static inline u64 rq_clock_pelt(struct rq *rq)
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
{
	if (unlikely(cfs_rq->throttle_count))
		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
		return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;

	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
}
#else
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+2 −2
Original line number Diff line number Diff line
@@ -575,8 +575,8 @@ struct cfs_rq {
	s64			runtime_remaining;

	u64			throttled_clock;
	u64			throttled_clock_task;
	u64			throttled_clock_task_time;
	u64			throttled_clock_pelt;
	u64			throttled_clock_pelt_time;
	int			throttled;
	int			throttle_count;
	struct list_head	throttled_list;