Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 305a7211 authored by Morten Rasmussen's avatar Morten Rasmussen Committed by Gerrit - the friendly Code Review server
Browse files

ANDROID: sched: Highest energy aware balancing sched_domain level pointer



Add another member to the family of per-cpu sched_domain shortcut
pointers. This one, sd_ea, points to the highest level at which energy
model is provided. At this level and all levels below all sched_groups
have energy model data attached.

Partial energy model information is possible but restricted to providing
energy model data for lower level sched_domains (sd_ea and below) and
leaving load-balancing on levels above to non-energy-aware
load-balancing. For example, it is possible to apply energy-aware
scheduling within each socket on a multi-socket system and let normal
scheduling handle load-balancing between sockets.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
Change-Id: Ie9ff4dc97b4fda3292ce58c22f1032cd3085529c
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
Git-commit: 9a778300
Git-repo: https://android.googlesource.com/kernel/common/


Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 329220d2
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -1067,6 +1067,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
DECLARE_PER_CPU(struct sched_domain *, sd_ea);


struct sched_group_capacity {
struct sched_group_capacity {
	atomic_t ref;
	atomic_t ref;
+10 −0
Original line number Original line Diff line number Diff line
@@ -399,11 +399,13 @@ DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
DEFINE_PER_CPU(struct sched_domain *, sd_ea);


static void update_top_cache_domain(int cpu)
static void update_top_cache_domain(int cpu)
{
{
	struct sched_domain_shared *sds = NULL;
	struct sched_domain_shared *sds = NULL;
	struct sched_domain *sd;
	struct sched_domain *sd;
	struct sched_domain *ea_sd = NULL;
	int id = cpu;
	int id = cpu;
	int size = 1;
	int size = 1;


@@ -424,6 +426,14 @@ static void update_top_cache_domain(int cpu)


	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);

	for_each_domain(cpu, sd) {
		if (sd->groups->sge)
			ea_sd = sd;
		else
			break;
	}
	rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
}
}


/*
/*