Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5da5828a authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Add sysctl to enable power aware scheduling"

parents eaf1c2ed 7e13b27b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1086,7 +1086,7 @@ high-performance CPU).

Appears at: /proc/sys/kernel/sched_enable_power_aware

Default value: 1
Default value: 0

Controls whether or not per-CPU power values are used in determining
task placement. If this is disabled, tasks are simply placed on the
+1 −0
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ extern unsigned int sysctl_sched_freq_account_wait_time;
extern unsigned int sysctl_sched_migration_fixup;
extern unsigned int sysctl_sched_heavy_task_pct;
extern unsigned int sysctl_sched_min_runtime;
extern unsigned int sysctl_sched_enable_power_aware;

#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sysctl_sched_init_task_load_pct;
+1 −1
Original line number Diff line number Diff line
@@ -1078,7 +1078,7 @@ static int __init set_sched_enable_power_aware(char *str)

	get_option(&str, &enable_power_aware);

	sched_enable_power_aware = !!enable_power_aware;
	sysctl_sched_enable_power_aware = !!enable_power_aware;

	return 0;
}
+9 −8
Original line number Diff line number Diff line
@@ -1265,8 +1265,9 @@ unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
/*
 * Control whether or not individual CPU power consumption is used to
 * guide task placement.
 * This sysctl can be set to a default value using boot command line arguments.
 */
unsigned int __read_mostly sched_enable_power_aware = 0;
unsigned int __read_mostly sysctl_sched_enable_power_aware = 0;

/*
 * This specifies the maximum percent power difference between 2
@@ -1714,7 +1715,7 @@ unsigned int power_cost_at_freq(int cpu, unsigned int freq)
	struct cpu_pstate_pwr *costs;

	if (!per_cpu_info || !per_cpu_info[cpu].ptable ||
	    !sched_enable_power_aware)
	    !sysctl_sched_enable_power_aware)
		/* When power aware scheduling is not in use, or CPU
		 * power data is not available, just use the CPU
		 * capacity as a rough stand-in for real CPU power
@@ -1745,7 +1746,7 @@ static unsigned int power_cost(struct task_struct *p, int cpu)
	unsigned int task_freq;
	unsigned int cur_freq = cpu_rq(cpu)->cur_freq;

	if (!sched_enable_power_aware)
	if (!sysctl_sched_enable_power_aware)
		return cpu_rq(cpu)->max_possible_capacity;

	/* calculate % of max freq needed */
@@ -2375,7 +2376,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
	if (!task_will_fit(p, cpu_of(rq)))
		return MOVE_TO_BIG_CPU;

	if (sched_enable_power_aware &&
	if (sysctl_sched_enable_power_aware &&
	    !is_task_migration_throttled(p) &&
	    is_cpu_throttling_imminent(cpu_of(rq)) &&
	    lower_power_cpu_available(p, cpu_of(rq)))
@@ -2470,7 +2471,7 @@ unsigned int cpu_temp(int cpu)

#else	/* CONFIG_SCHED_HMP */

#define sched_enable_power_aware 0
#define sysctl_sched_enable_power_aware 0

static inline int task_will_fit(struct task_struct *p, int cpu)
{
@@ -6166,7 +6167,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	 * prioritize spreading work over power optimization.
	 */
	cpu = cpumask_first(sched_group_cpus(sg));
	if (sched_enable_power_aware &&
	if (sysctl_sched_enable_power_aware &&
	    !sds->busiest && (capacity(env->dst_rq) == group_rq_capacity(sg)) &&
	    sgs->sum_nr_running && (env->idle != CPU_NOT_IDLE) &&
	    power_cost_at_freq(env->dst_cpu, 0) <
@@ -6963,7 +6964,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
	 * most power-efficient idle CPU. */
	rcu_read_lock();
	sd = rcu_dereference_check_sched_domain(this_rq->sd);
	if (sd && sched_enable_power_aware) {
	if (sd && sysctl_sched_enable_power_aware) {
		for_each_cpu(i, sched_domain_span(sd)) {
			if (i == this_cpu || idle_cpu(i)) {
				cost = power_cost_at_freq(i, 0);
@@ -7372,7 +7373,7 @@ static int select_lowest_power_cpu(struct cpumask *cpus)
	int lowest_power_cpu = -1;
	int lowest_power = INT_MAX;

	if (sched_enable_power_aware) {
	if (sysctl_sched_enable_power_aware) {
		for_each_cpu(i, cpus) {
			cost = power_cost_at_freq(i, 0);
			if (cost < lowest_power) {
+0 −1
Original line number Diff line number Diff line
@@ -886,7 +886,6 @@ static inline void clear_reserved(int cpu)
}

extern unsigned int sched_enable_hmp;
extern unsigned int sched_enable_power_aware;

int mostly_idle_cpu(int cpu);
extern void check_for_migration(struct rq *rq, struct task_struct *p);
Loading