Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af833ae6 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Track burst length for tasks"

parents 1906639e f8c7c6ff
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1395,6 +1395,7 @@ struct ravg {
	u32 sum_history[RAVG_HIST_SIZE_MAX];
	u32 *curr_window_cpu, *prev_window_cpu;
	u32 curr_window, prev_window;
	u64 curr_burst, avg_burst;
	u16 active_windows;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
+4 −2
Original line number Diff line number Diff line
@@ -134,6 +134,7 @@ TRACE_EVENT(sched_task_load,
		__field(	int,	best_cpu		)
		__field(	u64,	latency			)
		__field(	int,	grp_id			)
		__field(	u64,	avg_burst		)
	),

	TP_fast_assign(
@@ -150,13 +151,14 @@ TRACE_EVENT(sched_task_load,
						      sched_ktime_clock() -
						      p->ravg.mark_start : 0;
		__entry->grp_id		= p->grp ? p->grp->id : 0;
		__entry->avg_burst	= p->ravg.avg_burst;
	),

	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu",
	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu",
		__entry->pid, __entry->comm, __entry->demand,
		__entry->boost, __entry->reason, __entry->sync,
		__entry->need_idle, __entry->flags, __entry->grp_id,
		__entry->best_cpu, __entry->latency)
		__entry->best_cpu, __entry->latency, __entry->avg_burst)
);

TRACE_EVENT(sched_set_preferred_cluster,
+3 −1
Original line number Diff line number Diff line
@@ -1723,7 +1723,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
	return cpu;
}

static void update_avg(u64 *avg, u64 sample)
void update_avg(u64 *avg, u64 sample)
{
	s64 diff = sample - *avg;
	*avg += diff >> 3;
@@ -3536,6 +3536,8 @@ static void __sched notrace __schedule(bool preempt)
	if (likely(prev != next)) {
		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
		if (!is_idle_task(prev) && !prev->on_rq)
			update_avg_burst(prev);

		rq->nr_switches++;
		rq->curr = next;
+44 −11
Original line number Diff line number Diff line
@@ -1552,6 +1552,8 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
	INIT_LIST_HEAD(&p->grp_list);
	memset(&p->ravg, 0, sizeof(struct ravg));
	p->cpu_cycles = 0;
	p->ravg.curr_burst = 0;
	p->ravg.avg_burst = 0;

	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
@@ -2738,12 +2740,14 @@ done:
	trace_sched_update_history(rq, p, runtime, samples, event);
}

static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
{
	delta = scale_exec_time(delta, rq);
	p->ravg.sum += delta;
	if (unlikely(p->ravg.sum > sched_ravg_window))
		p->ravg.sum = sched_ravg_window;

	return delta;
}

/*
@@ -2796,13 +2800,14 @@ static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
 * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
 * depends on it!
 */
static void update_task_demand(struct task_struct *p, struct rq *rq,
static u64 update_task_demand(struct task_struct *p, struct rq *rq,
			       int event, u64 wallclock)
{
	u64 mark_start = p->ravg.mark_start;
	u64 delta, window_start = rq->window_start;
	int new_window, nr_full_windows;
	u32 window_size = sched_ravg_window;
	u64 runtime;

	new_window = mark_start < window_start;
	if (!account_busy_for_task_demand(p, event)) {
@@ -2816,7 +2821,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
			 * it is not necessary to account those.
			 */
			update_history(rq, p, p->ravg.sum, 1, event);
		return;
		return 0;
	}

	if (!new_window) {
@@ -2824,8 +2829,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
		 * The simple case - busy time contained within the existing
		 * window.
		 */
		add_to_task_demand(rq, p, wallclock - mark_start);
		return;
		return add_to_task_demand(rq, p, wallclock - mark_start);
	}

	/*
@@ -2837,13 +2841,16 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
	window_start -= (u64)nr_full_windows * (u64)window_size;

	/* Process (window_start - mark_start) first */
	add_to_task_demand(rq, p, window_start - mark_start);
	runtime = add_to_task_demand(rq, p, window_start - mark_start);

	/* Push new sample(s) into task's demand history */
	update_history(rq, p, p->ravg.sum, 1, event);
	if (nr_full_windows)
		update_history(rq, p, scale_exec_time(window_size, rq),
			       nr_full_windows, event);
	if (nr_full_windows) {
		u64 scaled_window = scale_exec_time(window_size, rq);

		update_history(rq, p, scaled_window, nr_full_windows, event);
		runtime += nr_full_windows * scaled_window;
	}

	/*
	 * Roll window_start back to current to process any remainder
@@ -2853,13 +2860,31 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,

	/* Process (wallclock - window_start) next */
	mark_start = window_start;
	add_to_task_demand(rq, p, wallclock - mark_start);
	runtime += add_to_task_demand(rq, p, wallclock - mark_start);

	return runtime;
}

static inline void
update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
{
	/*
	 * update_task_demand() has checks for idle task and
	 * exit task. The runtime may include the wait time,
	 * so update the burst only for the cases where the
	 * task is running.
	 */
	if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
				rq->curr == p))
		p->ravg.curr_burst += runtime;
}

/* Reflect task activity on its demand and cpu's busy time statistics */
void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
						u64 wallclock, u64 irqtime)
{
	u64 runtime;

	if (!rq->window_start || sched_disable_window_stats ||
	    p->ravg.mark_start == wallclock)
		return;
@@ -2874,7 +2899,9 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
	}

	update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
	update_task_demand(p, rq, event, wallclock);
	runtime = update_task_demand(p, rq, event, wallclock);
	if (runtime)
		update_task_burst(p, rq, event, runtime);
	update_cpu_busy_time(p, rq, event, wallclock, irqtime);
	update_task_pred_demand(rq, p, event);
done:
@@ -4462,6 +4489,12 @@ bool early_detection_notify(struct rq *rq, u64 wallclock)
	return 0;
}

void update_avg_burst(struct task_struct *p)
{
	update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
	p->ravg.curr_burst = 0;
}

#ifdef CONFIG_CGROUP_SCHED
u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
					  struct cftype *cft)
+4 −0
Original line number Diff line number Diff line
@@ -1137,6 +1137,8 @@ extern int update_preferred_cluster(struct related_thread_group *grp,
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
extern void update_avg_burst(struct task_struct *p);
extern void update_avg(u64 *avg, u64 sample);

enum sched_boost_policy {
	SCHED_BOOST_NONE,
@@ -1675,6 +1677,8 @@ static inline int alloc_related_thread_groups(void) { return 0; }
#define trace_sched_cpu_load_cgroup(...)
#define trace_sched_cpu_load_wakeup(...)

static inline void update_avg_burst(struct task_struct *p) {}

#endif	/* CONFIG_SCHED_HMP */

/*