Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a05271a0 authored by Vincent Guittot's avatar Vincent Guittot Committed by Chris Redpath
Browse files

FROMLIST: sched/rt: add utilization tracking



schedutil governor relies on cfs_rq's util_avg to choose the OPP when cfs
tasks are running. When the CPU is overloaded by cfs and rt tasks, cfs tasks
are preempted by rt tasks and in this case util_avg reflects the remaining
capacity that is used by cfs tasks but not what cfs tasks want to use. In such
case, schedutil can select a lower OPP when cfs task runs whereas the CPU is
overloaded. In order to have a more accurate view of the utilization of the
CPU, we track the utilization that is used by RT tasks.
DL tasks are not taken into account as they have their own utilization
tracking mecanism.

We don't use rt_avg which doesn't have the same dynamic as PELT and which
can include IRQ time.

Change-Id: Icf36a67ea516b1ab21d9443a70e4b253f248550f
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
[backported from list: http://marc.info/?l=linux-kernel&m=149786230529220

]
Signed-off-by: default avatarJuri Lelli <juri.lelli@arm.com>
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
parent da2ceceb
Loading
Loading
Loading
Loading
+25 −6
Original line number Diff line number Diff line
@@ -2967,7 +2967,8 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
 */
static __always_inline int
___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
		  unsigned long weight, int running, struct cfs_rq *cfs_rq)
		  unsigned long weight, int running, struct cfs_rq *cfs_rq,
		  struct rt_rq *rt_rq)
{
	u64 delta;

@@ -3025,8 +3026,10 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,

	if (cfs_rq)
		trace_sched_load_cfs_rq(cfs_rq);
	else
	else {
		if (likely(!rt_rq))
			trace_sched_load_se(container_of(sa, struct sched_entity, avg));
	}

	return 1;
}
@@ -3034,7 +3037,7 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
static int
__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
{
	return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);
	return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL, NULL);
}

static int
@@ -3042,7 +3045,7 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit
{
	return ___update_load_avg(now, cpu, &se->avg,
				  se->on_rq * scale_load_down(se->load.weight),
				  cfs_rq->curr == se, NULL);
				  cfs_rq->curr == se, NULL, NULL);
}

static int
@@ -3050,7 +3053,7 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
{
	return ___update_load_avg(now, cpu, &cfs_rq->avg,
			scale_load_down(cfs_rq->load.weight),
			cfs_rq->curr != NULL, cfs_rq);
			cfs_rq->curr != NULL, cfs_rq, NULL);
}

/*
@@ -3390,6 +3393,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
	return decayed || removed_load;
}

int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
{
	int ret;

	ret = ___update_load_avg(now, cpu, &rt_rq->avg, 0, running, NULL, rt_rq);

	return ret;
}

/*
 * Optional action to be done while updating the load average
 */
@@ -3565,6 +3577,11 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
	return 0;
}

int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
{
	return 0;
}

#define UPDATE_TG	0x0
#define SKIP_AGE_LOAD	0x0

@@ -7480,6 +7497,7 @@ static void update_blocked_averages(int cpu)
		if (cfs_rq_is_decayed(cfs_rq))
			list_del_leaf_cfs_rq(cfs_rq);
	}
	update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
#ifdef CONFIG_NO_HZ_COMMON
	rq->last_blocked_load_update_tick = jiffies;
#endif
@@ -7542,6 +7560,7 @@ static inline void update_blocked_averages(int cpu)
	rq_lock_irqsave(rq, &rf);
	update_rq_clock(rq);
	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
	update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
#ifdef CONFIG_NO_HZ_COMMON
	rq->last_blocked_load_update_tick = jiffies;
#endif
+9 −0
Original line number Diff line number Diff line
@@ -1536,6 +1536,8 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
	return p;
}

extern int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running);

static struct task_struct *
pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
@@ -1581,6 +1583,10 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)

	queue_push_tasks(rq);

	if (p)
		update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), rt_rq,
					rq->curr->sched_class == &rt_sched_class);

	return p;
}

@@ -1588,6 +1594,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
	update_curr_rt(rq);

	update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1);

	/*
	 * The previous task needs to be made eligible for pushing
	 * if it is still active
@@ -2370,6 +2378,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
	struct sched_rt_entity *rt_se = &p->rt;

	update_curr_rt(rq);
	update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1);

	watchdog(rq, p);

+4 −0
Original line number Diff line number Diff line
@@ -524,6 +524,9 @@ struct rt_rq {
	unsigned long rt_nr_total;
	int overloaded;
	struct plist_head pushable_tasks;

	struct sched_avg avg;

#ifdef HAVE_RT_PUSH_IPI
	int push_flags;
	int push_cpu;
@@ -1682,6 +1685,7 @@ static inline int hrtick_enabled(struct rq *rq)

#ifdef CONFIG_SMP
extern void sched_avg_update(struct rq *rq);
extern unsigned long sched_get_rt_rq_util(int cpu);

#ifndef arch_scale_freq_capacity
static __always_inline