Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 076ac2af authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar
Browse files

sched, numa: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c



  * Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c,
    where appropriate.  This saves some allocated space as well as many
    wasted cycles going through node entries that are non-existent.

Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 886533a3
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)

	min_val = INT_MAX;

	for (i = 0; i < MAX_NUMNODES; i++) {
	for (i = 0; i < nr_node_ids; i++) {
		/* Start at @node */
		n = (node + i) % MAX_NUMNODES;
		n = (node + i) % nr_node_ids;

		if (!nr_cpus_node(n))
			continue;
@@ -6734,7 +6734,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
		if (!sched_group_nodes)
			continue;

		for (i = 0; i < MAX_NUMNODES; i++) {
		for (i = 0; i < nr_node_ids; i++) {
			struct sched_group *oldsg, *sg = sched_group_nodes[i];

			*nodemask = node_to_cpumask(i);
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
	/*
	 * Allocate the per-node list of sched groups
	 */
	sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
	sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
				    GFP_KERNEL);
	if (!sched_group_nodes) {
		printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif

	/* Set up physical groups */
	for (i = 0; i < MAX_NUMNODES; i++) {
	for (i = 0; i < nr_node_ids; i++) {
		SCHED_CPUMASK_VAR(nodemask, allmasks);
		SCHED_CPUMASK_VAR(send_covered, allmasks);

@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
					send_covered, tmpmask);
	}

	for (i = 0; i < MAX_NUMNODES; i++) {
	for (i = 0; i < nr_node_ids; i++) {
		/* Set up node groups */
		struct sched_group *sg, *prev;
		SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
		cpus_or(*covered, *covered, *nodemask);
		prev = sg;

		for (j = 0; j < MAX_NUMNODES; j++) {
		for (j = 0; j < nr_node_ids; j++) {
			SCHED_CPUMASK_VAR(notcovered, allmasks);
			int n = (i + j) % MAX_NUMNODES;
			int n = (i + j) % nr_node_ids;
			node_to_cpumask_ptr(pnodemask, n);

			cpus_complement(*notcovered, *covered);
@@ -7184,7 +7184,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
	}

#ifdef CONFIG_NUMA
	for (i = 0; i < MAX_NUMNODES; i++)
	for (i = 0; i < nr_node_ids; i++)
		init_numa_sched_groups_power(sched_group_nodes[i]);

	if (sd_allnodes) {