Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d8cb903 authored by Morten Rasmussen's avatar Morten Rasmussen Committed by Andres Oportus
Browse files

UPSTREAM: sched/fair: Add per-CPU min capacity to sched_group_capacity



struct sched_group_capacity currently represents the compute capacity
sum of all CPUs in the sched_group.

Unless it is divided by the group_weight to get the average capacity
per CPU, it hides differences in CPU capacity for mixed capacity systems
(e.g. high RT/IRQ utilization or ARM big.LITTLE).

But even the average may not be sufficient if the group covers CPUs of
different capacities.

Instead, by extending struct sched_group_capacity to indicate min per-CPU
capacity in the group a suitable group for a given task utilization can
more easily be found such that CPUs with reduced capacity can be avoided
for tasks with high utilization (not implemented by this patch).

Change-Id: I833725e2b70794384c2b8efac5dc8107f1dbb622
Signed-off-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: freedom.tan@mediatek.com
Cc: keita.kobayashi.ym@renesas.com
Cc: mgalbraith@suse.de
Cc: sgurrappadi@nvidia.com
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1476452472-24740-4-git-send-email-morten.rasmussen@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
(cherry picked from commit bf475ce0a3dd75b5d1df6c6c14ae25168caa15ac)
[Fixed cherry-pick issue]
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent 54444350
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -6386,6 +6386,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
		 */
		sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
		sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
		sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;

		/*
		 * Make sure the first group of this domain contains the
+6 −1
Original line number Diff line number Diff line
@@ -7688,13 +7688,14 @@ skip_unlock: __attribute__ ((unused));
	cpu_rq(cpu)->cpu_capacity = capacity;
	sdg->sgc->capacity = capacity;
	sdg->sgc->max_capacity = capacity;
	sdg->sgc->min_capacity = capacity;
}

void update_group_capacity(struct sched_domain *sd, int cpu)
{
	struct sched_domain *child = sd->child;
	struct sched_group *group, *sdg = sd->groups;
	unsigned long capacity, max_capacity;
	unsigned long capacity, max_capacity, min_capacity;
	unsigned long interval;

	interval = msecs_to_jiffies(sd->balance_interval);
@@ -7708,6 +7709,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)

	capacity = 0;
	max_capacity = 0;
	min_capacity = ULONG_MAX;

	if (child->flags & SD_OVERLAP) {
		/*
@@ -7738,6 +7740,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
			}

			max_capacity = max(capacity, max_capacity);
			min_capacity = min(capacity, min_capacity);
		}
	} else  {
		/*
@@ -7751,12 +7754,14 @@ void update_group_capacity(struct sched_domain *sd, int cpu)

			capacity += sgc->capacity;
			max_capacity = max(sgc->max_capacity, max_capacity);
			min_capacity = min(sgc->min_capacity, min_capacity);
			group = group->next;
		} while (group != child->groups);
	}

	sdg->sgc->capacity = capacity;
	sdg->sgc->max_capacity = max_capacity;
	sdg->sgc->min_capacity = min_capacity;
}

/*
+1 −0
Original line number Diff line number Diff line
@@ -933,6 +933,7 @@ struct sched_group_capacity {
	 */
	unsigned long capacity;
	unsigned long max_capacity; /* Max per-cpu capacity in group */
	unsigned long min_capacity; /* Min per-CPU capacity in group */
	unsigned long next_update;
	int imbalance; /* XXX unrelated to capacity but shared group state */