Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d274cb30 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Simplify ->cpu_power initialization



The code in update_group_power() does what init_sched_groups_power()
does and more, so remove the special init_ code and call the generic
code instead.

Also move the sd->span_weight initialization because
update_group_power() needs it.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122941.875856012@chello.nl


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c4a8849a
Loading
Loading
Loading
Loading
+5 −39
Original line number Diff line number Diff line
@@ -6679,9 +6679,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
	struct rq *rq = cpu_rq(cpu);
	struct sched_domain *tmp;

	for (tmp = sd; tmp; tmp = tmp->parent)
		tmp->span_weight = cpumask_weight(sched_domain_span(tmp));

	/* Remove the sched domains which do not contribute to scheduling. */
	for (tmp = sd; tmp; ) {
		struct sched_domain *parent = tmp->parent;
@@ -7159,11 +7156,6 @@ static void free_sched_groups(const struct cpumask *cpu_map,
 */
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
	struct sched_domain *child;
	struct sched_group *group;
	long power;
	int weight;

	WARN_ON(!sd || !sd->groups);

	if (cpu != group_first_cpu(sd->groups))
@@ -7171,36 +7163,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)

	sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));

	child = sd->child;

	sd->groups->cpu_power = 0;

	if (!child) {
		power = SCHED_LOAD_SCALE;
		weight = cpumask_weight(sched_domain_span(sd));
		/*
		 * SMT siblings share the power of a single core.
		 * Usually multiple threads get a better yield out of
		 * that one core than a single thread would have,
		 * reflect that in sd->smt_gain.
		 */
		if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
			power *= sd->smt_gain;
			power /= weight;
			power >>= SCHED_LOAD_SHIFT;
		}
		sd->groups->cpu_power += power;
		return;
	}

	/*
	 * Add cpu_power of each child group to this groups cpu_power.
	 */
	group = child->groups;
	do {
		sd->groups->cpu_power += group->cpu_power;
		group = group->next;
	} while (group != child->groups);
	update_group_power(sd, cpu);
}

/*
@@ -7507,7 +7470,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
{
	enum s_alloc alloc_state = sa_none;
	struct s_data d;
	struct sched_domain *sd;
	struct sched_domain *sd, *tmp;
	int i;
#ifdef CONFIG_NUMA
	d.sd_allnodes = 0;
@@ -7530,6 +7493,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
		sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
		sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
		sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);

		for (tmp = sd; tmp; tmp = tmp->parent)
			tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
	}

	for_each_cpu(i, cpu_map) {