Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0601a88d authored by Andreas Herrmann's avatar Andreas Herrmann Committed by Ingo Molnar
Browse files

sched: Separate out build of NUMA sched groups from __build_sched_domains



... to further strip down __build_sched_domains().

Signed-off-by: default avatarAndreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818110111.GL29515@alberich.amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent de616e36
Loading
Loading
Loading
Loading
+67 −63
Original line number Diff line number Diff line
@@ -8246,6 +8246,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
		sg = sg->next;
	} while (sg != group_head);
}

static int build_numa_sched_groups(struct s_data *d,
				   const struct cpumask *cpu_map, int num)
{
	struct sched_domain *sd;
	struct sched_group *sg, *prev;
	int n, j;

	cpumask_clear(d->covered);
	cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
	if (cpumask_empty(d->nodemask)) {
		d->sched_group_nodes[num] = NULL;
		goto out;
	}

	sched_domain_node_span(num, d->domainspan);
	cpumask_and(d->domainspan, d->domainspan, cpu_map);

	sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
			  GFP_KERNEL, num);
	if (!sg) {
		printk(KERN_WARNING "Can not alloc domain group for node %d\n",
		       num);
		return -ENOMEM;
	}
	d->sched_group_nodes[num] = sg;

	for_each_cpu(j, d->nodemask) {
		sd = &per_cpu(node_domains, j).sd;
		sd->groups = sg;
	}

	sg->__cpu_power = 0;
	cpumask_copy(sched_group_cpus(sg), d->nodemask);
	sg->next = sg;
	cpumask_or(d->covered, d->covered, d->nodemask);

	prev = sg;
	for (j = 0; j < nr_node_ids; j++) {
		n = (num + j) % nr_node_ids;
		cpumask_complement(d->notcovered, d->covered);
		cpumask_and(d->tmpmask, d->notcovered, cpu_map);
		cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
		if (cpumask_empty(d->tmpmask))
			break;
		cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
		if (cpumask_empty(d->tmpmask))
			continue;
		sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
				  GFP_KERNEL, num);
		if (!sg) {
			printk(KERN_WARNING
			       "Can not alloc domain group for node %d\n", j);
			return -ENOMEM;
		}
		sg->__cpu_power = 0;
		cpumask_copy(sched_group_cpus(sg), d->tmpmask);
		sg->next = prev->next;
		cpumask_or(d->covered, d->covered, d->tmpmask);
		prev->next = sg;
		prev = sg;
	}
out:
	return 0;
}
#endif /* CONFIG_NUMA */

#ifdef CONFIG_NUMA
@@ -8652,70 +8717,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
	if (d.sd_allnodes)
		build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);

	for (i = 0; i < nr_node_ids; i++) {
		/* Set up node groups */
		struct sched_group *sg, *prev;
		int j;

		cpumask_clear(d.covered);
		cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
		if (cpumask_empty(d.nodemask)) {
			d.sched_group_nodes[i] = NULL;
			continue;
		}

		sched_domain_node_span(i, d.domainspan);
		cpumask_and(d.domainspan, d.domainspan, cpu_map);

		sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
				  GFP_KERNEL, i);
		if (!sg) {
			printk(KERN_WARNING "Can not alloc domain group for "
				"node %d\n", i);
			goto error;
		}
		d.sched_group_nodes[i] = sg;
		for_each_cpu(j, d.nodemask) {
			struct sched_domain *sd;

			sd = &per_cpu(node_domains, j).sd;
			sd->groups = sg;
		}
		sg->__cpu_power = 0;
		cpumask_copy(sched_group_cpus(sg), d.nodemask);
		sg->next = sg;
		cpumask_or(d.covered, d.covered, d.nodemask);
		prev = sg;

		for (j = 0; j < nr_node_ids; j++) {
			int n = (i + j) % nr_node_ids;

			cpumask_complement(d.notcovered, d.covered);
			cpumask_and(d.tmpmask, d.notcovered, cpu_map);
			cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
			if (cpumask_empty(d.tmpmask))
				break;

			cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
			if (cpumask_empty(d.tmpmask))
				continue;

			sg = kmalloc_node(sizeof(struct sched_group) +
					  cpumask_size(),
					  GFP_KERNEL, i);
			if (!sg) {
				printk(KERN_WARNING
				"Can not alloc domain group for node %d\n", j);
	for (i = 0; i < nr_node_ids; i++)
		if (build_numa_sched_groups(&d, cpu_map, i))
			goto error;
			}
			sg->__cpu_power = 0;
			cpumask_copy(sched_group_cpus(sg), d.tmpmask);
			sg->next = prev->next;
			cpumask_or(d.covered, d.covered, d.tmpmask);
			prev->next = sg;
			prev = sg;
		}
	}
#endif

	/* Calculate CPU power for physical packages and nodes */