Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 351d3fad authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Todd Kjos
Browse files

ANDROID: sched: WALT: Add support for CFS_BANDWIDTH



cumulative runnable average is maintained in cfs_rq along with
rq so that when a cfs_rq is throttled/unthrottled, the contribution
of that cfs_rq can be updated at the rq level. Implement the
fixup_cumulative_runnable_avg callback for fair class to handle
the cfs_rq cumulative runnable average updates when the runnable
tasks demand is changed.

Bug: 139071966
Change-Id: Iccd473677cf491920aa82a6fc7e0a5374e5bb27f
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: default avatarTodd Kjos <tkjos@google.com>
parent 1c847afb
Loading
Loading
Loading
Loading
+64 −3
Original line number Diff line number Diff line
@@ -4214,6 +4214,23 @@ static int tg_throttle_down(struct task_group *tg, void *data)
	return 0;
}

#ifdef CONFIG_SCHED_WALT
static inline void walt_propagate_cumulative_runnable_avg(u64 *accumulated,
							  u64 value, bool add)
{
	if (add)
		*accumulated += value;
	else
		*accumulated -= value;
}
#else
/*
 * Provide a nop definition since cumulative_runnable_avg is not
 * available in rq or cfs_rq when WALT is not enabled.
 */
#define walt_propagate_cumulative_runnable_avg(...)
#endif

static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
	struct rq *rq = rq_of(cfs_rq);
@@ -4239,13 +4256,21 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
		if (dequeue)
			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
		qcfs_rq->h_nr_running -= task_delta;
		walt_propagate_cumulative_runnable_avg(
				   &qcfs_rq->cumulative_runnable_avg,
				   cfs_rq->cumulative_runnable_avg, false);


		if (qcfs_rq->load.weight)
			dequeue = 0;
	}

	if (!se)
	if (!se) {
		sub_nr_running(rq, task_delta);
		walt_propagate_cumulative_runnable_avg(
				   &rq->cumulative_runnable_avg,
				   cfs_rq->cumulative_runnable_avg, false);
	}

	cfs_rq->throttled = 1;
	cfs_rq->throttled_clock = rq_clock(rq);
@@ -4279,6 +4304,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
	struct sched_entity *se;
	int enqueue = 1;
	long task_delta;
	struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq;

	se = cfs_rq->tg->se[cpu_of(rq)];

@@ -4306,13 +4332,20 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
		if (enqueue)
			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
		cfs_rq->h_nr_running += task_delta;
		walt_propagate_cumulative_runnable_avg(
				   &cfs_rq->cumulative_runnable_avg,
				   tcfs_rq->cumulative_runnable_avg, true);

		if (cfs_rq_throttled(cfs_rq))
			break;
	}

	if (!se)
	if (!se) {
		add_nr_running(rq, task_delta);
		walt_propagate_cumulative_runnable_avg(
				   &rq->cumulative_runnable_avg,
				   tcfs_rq->cumulative_runnable_avg, true);
	}

	/* determine whether we need to wake up potentially idle cpu */
	if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -4751,6 +4784,30 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
	}
}

#ifdef CONFIG_SCHED_WALT
static void walt_fixup_cumulative_runnable_avg_fair(struct rq *rq,
						    struct task_struct *p,
						    u64 new_task_load)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;
	s64 task_load_delta = (s64)new_task_load - p->ravg.demand;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);

		cfs_rq->cumulative_runnable_avg += task_load_delta;
		if (cfs_rq_throttled(cfs_rq))
			break;
	}

	/* Fix up rq only if we didn't find any throttled cfs_rq */
	if (!se)
		walt_fixup_cumulative_runnable_avg(rq, p, new_task_load);
}

#endif /* CONFIG_SCHED_WALT */

#else /* CONFIG_CFS_BANDWIDTH */
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
@@ -4793,6 +4850,9 @@ static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
static inline void update_runtime_enabled(struct rq *rq) {}
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}

#define walt_fixup_cumulative_runnable_avg_fair \
	walt_fixup_cumulative_runnable_avg

#endif /* CONFIG_CFS_BANDWIDTH */

/**************************************************
@@ -11001,7 +11061,8 @@ const struct sched_class fair_sched_class = {
	.task_change_group	= task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_WALT
	.fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
	.fixup_cumulative_runnable_avg =
		walt_fixup_cumulative_runnable_avg_fair,
#endif
};