Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7bca8f3 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: window_stats: Add "disable" mode support"

parents bf7b7293 b432b691
Loading
Loading
Loading
Loading
+18 −4
Original line number Diff line number Diff line
@@ -1156,6 +1156,9 @@ __read_mostly unsigned int sched_ravg_window = 10000000;
/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
unsigned int __read_mostly sched_use_pelt;

/* Temporarily disable window-stats activity on all cpus */
unsigned int __read_mostly sched_disable_window_stats;

unsigned int max_possible_efficiency = 1024;
unsigned int min_possible_efficiency = 1024;

@@ -1393,7 +1396,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
	u64 window_start;
	s64 delta = 0;

	if (sched_use_pelt || !rq->window_start)
	if (sched_use_pelt || !rq->window_start || sched_disable_window_stats)
		return;

	lockdep_assert_held(&rq->lock);
@@ -1594,6 +1597,9 @@ static inline void mark_task_starting(struct task_struct *p)
	struct rq *rq = task_rq(p);
	u64 wallclock = sched_clock();

	if (sched_disable_window_stats)
		return;

	if (!rq->window_start) {
		p->ravg.partial_demand = 0;
		p->ravg.demand = 0;
@@ -1698,9 +1704,11 @@ void sched_exit(struct task_struct *p)
	/* rq->curr == p */
	update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
	dequeue_task(rq, p, 0);
	if (p->ravg.flags & CURR_WINDOW_CONTRIB)
	if (!sched_disable_window_stats &&
			(p->ravg.flags & CURR_WINDOW_CONTRIB))
		rq->curr_runnable_sum -= p->ravg.partial_demand;
	if (p->ravg.flags & PREV_WINDOW_CONTRIB)
	if (!sched_disable_window_stats &&
			(p->ravg.flags & PREV_WINDOW_CONTRIB))
		rq->prev_runnable_sum -= p->ravg.demand;
	BUG_ON((s64)rq->curr_runnable_sum < 0);
	BUG_ON((s64)rq->prev_runnable_sum < 0);
@@ -2000,10 +2008,15 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
	struct rq *src_rq = task_rq(p);
	struct rq *dest_rq = cpu_rq(new_cpu);
	u64 wallclock;
	int freq_notify = 0;

	if (p->state == TASK_WAKING)
		double_rq_lock(src_rq, dest_rq);

	if (sched_disable_window_stats)
		goto done;

	freq_notify = 1;
	wallclock = sched_clock();

	update_task_ravg(task_rq(p)->curr, task_rq(p),
@@ -2055,10 +2068,11 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
	trace_sched_migration_update_sum(src_rq);
	trace_sched_migration_update_sum(dest_rq);

done:
	if (p->state == TASK_WAKING)
		double_rq_unlock(src_rq, dest_rq);

	if (cpumask_test_cpu(new_cpu,
	if (!freq_notify && cpumask_test_cpu(new_cpu,
			     &src_rq->freq_domain_cpumask))
		return;

+2 −2
Original line number Diff line number Diff line
@@ -1820,7 +1820,7 @@ done:

void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
{
	if (!sched_enable_hmp)
	if (!sched_enable_hmp || sched_disable_window_stats)
		return;

	if (is_big_task(p))
@@ -1831,7 +1831,7 @@ void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)

void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
{
	if (!sched_enable_hmp)
	if (!sched_enable_hmp || sched_disable_window_stats)
		return;

	if (is_big_task(p))
+3 −2
Original line number Diff line number Diff line
@@ -689,6 +689,7 @@ extern void init_new_task_load(struct task_struct *p);

extern unsigned int sched_ravg_window;
extern unsigned int sched_use_pelt;
extern unsigned int sched_disable_window_stats;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
extern unsigned int pct_task_load(struct task_struct *p);
@@ -718,7 +719,7 @@ inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
	if (sched_use_pelt)
		rq->cumulative_runnable_avg +=
				p->se.avg.runnable_avg_sum_scaled;
	else
	else if (!sched_disable_window_stats)
		rq->cumulative_runnable_avg += p->ravg.demand;
}

@@ -728,7 +729,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
	if (sched_use_pelt)
		rq->cumulative_runnable_avg -=
				p->se.avg.runnable_avg_sum_scaled;
	else
	else if (!sched_disable_window_stats)
		rq->cumulative_runnable_avg -= p->ravg.demand;
	BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}