Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1ec666a authored by Chris Redpath's avatar Chris Redpath
Browse files

ANDROID: sched: Unconditionally honor sync flag for energy-aware wakeups



Since we don't do energy-aware wakeups when we are overutilized, always
honoring sync wakeups in this state does not prevent wake-wide mechanics
overruling the flag as normal.

This patch is based upon previous work to build EAS for android products.

sync-hint code taken from commit 4a5e890ec60d
"sched/fair: add tunable to force selection at cpu granularity" written
by Juri Lelli <juri.lelli@arm.com>

Change-Id: I4b3d79141fc8e53dc51cd63ac11096c2e3cb10f5
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
parent 58afaac0
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };

extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;

+15 −2
Original line number Diff line number Diff line
@@ -54,6 +54,11 @@
unsigned int sysctl_sched_latency			= 6000000ULL;
unsigned int normalized_sysctl_sched_latency		= 6000000ULL;

/*
 * Enable/disable honoring sync flag in energy-aware wakeups.
 */
unsigned int sysctl_sched_sync_hint_enable = 1;

/*
 * The initial- and re-scaling of tunables is configurable
 *
@@ -6343,13 +6348,21 @@ static bool cpu_overutilized(int cpu)
	return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
}

static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
	int i;
	int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
	unsigned long max_spare = 0;
	struct sched_domain *sd;

	if (sysctl_sched_sync_hint_enable && sync) {
		int cpu = smp_processor_id();

		if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
			return cpu;
		}
	}

	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));

	if (!sd)
@@ -6427,7 +6440,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
	rcu_read_lock();
	sd = rcu_dereference(cpu_rq(prev_cpu)->sd);
	if (energy_aware() && sd && !sd_overutilized(sd)) {
		new_cpu = select_energy_cpu_brute(p, prev_cpu);
		new_cpu = select_energy_cpu_brute(p, prev_cpu, sync);
		goto unlock;
	}

+7 −0
Original line number Diff line number Diff line
@@ -329,6 +329,13 @@ static struct ctl_table kern_table[] = {
		.extra1		= &min_sched_granularity_ns,
		.extra2		= &max_sched_granularity_ns,
	},
	{
		.procname	= "sched_sync_hint_enable",
		.data		= &sysctl_sched_sync_hint_enable,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
	{
		.procname	= "sched_wakeup_granularity_ns",
		.data		= &sysctl_sched_wakeup_granularity,