Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2929d0b0 authored by Morten Rasmussen's avatar Morten Rasmussen Committed by Satya Durga Srinivasu Prabhala
Browse files

FROMLIST: sched: Add static_key for asymmetric cpu capacity optimizations



The existing asymmetric cpu capacity code should cause minimal overhead
for others. Putting it behind a static_key, it has been done for SMT
optimizations, would make it easier to extend and improve without
causing harm to others moving forward.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>

Change-Id: Iced93ffb71bb2c34eee783c585cffe7252137308
Signed-off-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
[Taken from https://lore.kernel.org/lkml/1530699470-29808-2-git-send-email-morten.rasmussen@arm.com

]
Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
Git-commit: 83a9ce05
Git-repo: https://android.googlesource.com/kernel/common/


Signed-off-by: default avatarPuja Gupta <pujag@codeaurora.org>
parent d18a2e31
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -7523,6 +7523,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
{
	long min_cap, max_cap;

	if (!static_branch_unlikely(&sched_asym_cpucapacity))
		return 0;

	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;

+1 −0
Original line number Diff line number Diff line
@@ -1190,6 +1190,7 @@ DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
DECLARE_PER_CPU(struct sched_domain *, sd_ea);
DECLARE_PER_CPU(struct sched_domain *, sd_scs);
extern struct static_key_false sched_asym_cpucapacity;

struct sched_group_capacity {
	atomic_t ref;
+19 −0
Original line number Diff line number Diff line
@@ -418,6 +418,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
DEFINE_PER_CPU(struct sched_domain *, sd_ea);
DEFINE_PER_CPU(struct sched_domain *, sd_scs);
DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);

static void update_top_cache_domain(int cpu)
{
@@ -457,6 +458,21 @@ static void update_top_cache_domain(int cpu)
	rcu_assign_pointer(per_cpu(sd_scs, cpu), sd);
}

static void update_asym_cpucapacity(int cpu)
{
	int enable = false;

	rcu_read_lock();
	if (lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY))
		enable = true;
	rcu_read_unlock();

	if (enable) {
		/* This expects to be hotplug-safe */
		static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
	}
}

/*
 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
 * hold the hotplug lock.
@@ -1868,6 +1884,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
	}
	rcu_read_unlock();

	if (!cpumask_empty(cpu_map))
		update_asym_cpucapacity(cpumask_first(cpu_map));

	if (rq && sched_debug_enabled) {
		pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);