Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb7bc528 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: Remove the sched heavy task frequency guidance feature



This has always been unused feature given its limitation of adding
phantom load to the system. Since there are no immediate plans of
using this and the fact that it adds unnecessary complications to
the new load fixup mechanism, remove this feature for now. It can
be revisited later in light of the new mechanism.

Change-Id: Ie9501a898d0f423338293a8dde6bc56f493f1e75
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 49fe8ea1
Loading
Loading
Loading
Loading
+9 −20
Original line number Diff line number Diff line
@@ -1123,19 +1123,8 @@ exceeds sched_freq_dec_notify, where freq_required is the frequency calculated
by scheduler to meet current task demand. Note that sched_freq_dec_notify is
specified in kHz units.

*** 7.12 sched_heavy_task

Appears at: /proc/sys/kernel/sched_heavy_task

Default value: 0

This tunable can be used to specify a demand value for tasks above which task
are classified as "heavy" tasks. Task's ravg.demand attribute is used for this
comparison. Scheduler will request a raise in cpu frequency when heavy tasks
wakeup after at least one window of sleep, where window size is defined by
sched_ravg_window. Value 0 will disable this feature.

*** 7.13 sched_cpu_high_irqload
*** 7.12 sched_cpu_high_irqload

Appears at: /proc/sys/kernel/sched_cpu_high_irqload

@@ -1153,7 +1142,7 @@ longer eligible for placement. This will affect the task placement logic
described above, causing the scheduler to try and steer tasks away from
the CPU.

*** 7.14 cpu.upmigrate_discourage
*** 7.13 cpu.upmigrate_discourage

Default value : 0

@@ -1169,7 +1158,7 @@ overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for
how overcommitment threshold is defined and also notes on
'sched_upmigrate_min_nice' tunable.

*** 7.15 sched_static_cpu_pwr_cost
*** 7.14 sched_static_cpu_pwr_cost

Default value: 0

@@ -1184,7 +1173,7 @@ within a cluster and possibly have differing value between clusters as
needed.


*** 7.16 sched_static_cluster_pwr_cost
*** 7.15 sched_static_cluster_pwr_cost

Default value: 0

@@ -1195,7 +1184,7 @@ power mode. It ignores the actual D-state that a cluster may be in and assumes
the worst case power cost of the highest D-state. It is means of biasing task
placement away from idle clusters when necessary.

*** 7.17 sched_early_detection_duration
*** 7.16 sched_early_detection_duration

Default value: 9500000

@@ -1206,7 +1195,7 @@ tick for it to be eligible for the scheduler's early detection feature
under scheduler boost. For more information on the feature itself please
refer to section 5.2.1.

*** 7.18 sched_restrict_cluster_spill
*** 7.17 sched_restrict_cluster_spill

Default value: 0

@@ -1225,7 +1214,7 @@ CPU across all clusters. When this tunable is enabled, the RT tasks are
restricted to the lowest possible power cluster.


*** 7.19 sched_downmigrate
*** 7.18 sched_downmigrate

Appears at: /proc/sys/kernel/sched_downmigrate

@@ -1238,7 +1227,7 @@ its demand *in reference to the power-efficient cpu* drops less than 60%
(sched_downmigrate).


*** 7.20 sched_small_wakee_task_load
*** 7.19 sched_small_wakee_task_load

Appears at: /proc/sys/kernel/sched_small_wakee_task_load

@@ -1250,7 +1239,7 @@ categorized as small wakee tasks. Scheduler places small wakee tasks on the
waker's cluster.


*** 7.21 sched_big_waker_task_load
*** 7.20 sched_big_waker_task_load

Appears at: /proc/sys/kernel/sched_big_waker_task_load

+1 −1
Original line number Diff line number Diff line
@@ -44,7 +44,6 @@ extern unsigned int sysctl_sched_wakeup_load_threshold;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_heavy_task_pct;

#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sysctl_sched_init_task_load_pct;
@@ -75,6 +74,7 @@ extern unsigned int sysctl_sched_account_wait_time;
extern unsigned int sysctl_sched_freq_account_wait_time;
extern unsigned int sysctl_sched_enable_power_aware;
extern unsigned int sysctl_sched_migration_fixup;
extern unsigned int sysctl_sched_heavy_task_pct;
#else
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_enable_colocation;
+0 −44
Original line number Diff line number Diff line
@@ -2082,22 +2082,6 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
	return SCHED_FREQ_ACCOUNT_WAIT_TIME;
}

static inline int
heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event)
{
	u32 task_demand = p->ravg.demand;

	if (!sched_heavy_task || event != TASK_WAKE ||
	    task_demand < sched_heavy_task || exiting_task(p))
		return 0;

	if (p->ravg.mark_start > rq->window_start)
		return 0;

	/* has a full window elapsed since task slept? */
	return (rq->window_start - p->ravg.mark_start > sched_ravg_window);
}

static inline bool is_new_task(struct task_struct *p)
{
	return p->ravg.active_windows < sysctl_sched_new_task_windows;
@@ -2451,18 +2435,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
		if (p_is_curr_task) {
			/* p is idle task */
			BUG_ON(p != rq->idle);
		} else if (heavy_task_wakeup(p, rq, event)) {
			/* A new window has started. If p is a waking
			 * heavy task its prev_window contribution is faked
			 * to be its window-based demand. Note that this can
			 * introduce phantom load into the system depending
			 * on the window policy and task behavior. This feature
			 * can be controlled via the sched_heavy_task
			 * tunable. */
			p->ravg.prev_window = p->ravg.demand;
			*prev_runnable_sum += p->ravg.demand;
			if (new_task)
				*nt_prev_runnable_sum += p->ravg.demand;
		}

		return;
@@ -3611,12 +3583,6 @@ done:

static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }

static inline int
heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event)
{
	return 0;
}

#endif	/* CONFIG_SCHED_FREQ_INPUT */

#define sched_up_down_migrate_auto_update 1
@@ -4307,12 +4273,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,

static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }

static inline int
heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event)
{
	return 0;
}

static struct cpu_cycle
update_task_ravg(struct task_struct *p, struct rq *rq,
			 int event, u64 wallclock, u64 irqtime)
@@ -4992,7 +4952,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	int cpu, src_cpu, success = 0;
	int notify = 0;
	struct migration_notify_data mnd;
	int heavy_task = 0;
#ifdef CONFIG_SMP
	unsigned int old_load;
	struct rq *rq;
@@ -5040,7 +4999,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	old_load = task_load(p);
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	heavy_task = heavy_task_wakeup(p, rq, TASK_WAKE);
	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
	raw_spin_unlock(&rq->lock);

@@ -5102,8 +5060,6 @@ out:
						false, check_group);
			check_for_freq_change(cpu_rq(src_cpu),
						false, check_group);
		} else if (heavy_task) {
			check_for_freq_change(cpu_rq(cpu), false, false);
		} else if (success) {
			check_for_freq_change(cpu_rq(cpu), true, false);
		}
+0 −14
Original line number Diff line number Diff line
@@ -2487,18 +2487,6 @@ unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;
unsigned int __read_mostly sched_spill_load;
unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;

/*
 * Tasks with demand >= sched_heavy_task will have their
 * window-based demand added to the previous window's CPU
 * time when they wake up, if they have slept for at least
 * one full window. This feature is disabled when the tunable
 * is set to 0 (the default).
 */
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int __read_mostly sysctl_sched_heavy_task_pct;
unsigned int __read_mostly sched_heavy_task;
#endif

/*
 * Tasks whose bandwidth consumption on a cpu is more than
 * sched_upmigrate are considered "big" tasks. Big tasks will be
@@ -2589,8 +2577,6 @@ void set_hmp_defaults(void)
	update_up_down_migrate();

#ifdef CONFIG_SCHED_FREQ_INPUT
	sched_heavy_task =
		pct_to_real(sysctl_sched_heavy_task_pct);
	sched_major_task_runtime =
		mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
#endif
+0 −1
Original line number Diff line number Diff line
@@ -969,7 +969,6 @@ extern unsigned int sched_upmigrate;
extern unsigned int sched_downmigrate;
extern unsigned int sched_init_task_load_pelt;
extern unsigned int sched_init_task_load_windows;
extern unsigned int sched_heavy_task;
extern unsigned int up_down_migrate_scale_factor;
extern unsigned int sysctl_sched_restrict_cluster_spill;
extern unsigned int sched_pred_alert_load;
Loading