Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 602a54a8 authored by travis@sgi.com's avatar travis@sgi.com Committed by Ingo Molnar
Browse files

x86: change bios_cpu_apicid to percpu data variable fixup



Change static bios_cpu_apicid array to a per_cpu data variable.
This includes using a static array used during initialization
similar to the way x86_cpu_to_apicid[] is handled.

There is one early use of bios_cpu_apicid in apic_is_clustered_box().
The other reference in cpu_present_to_apicid() is called after
smp_set_apicids() has setup the percpu version of bios_cpu_apicid.

Signed-off-by: default avatarMike Travis <travis@sgi.com>
Reviewed-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 0164fe16
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -1205,9 +1205,9 @@ __cpuinit int apic_is_clustered_box(void)


	/* Problem:  Partially populated chassis may not have CPUs in some of
	/* Problem:  Partially populated chassis may not have CPUs in some of
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
	 * clusters are allocated sequentially, count zeros only if they are
	 * Since clusters are allocated sequentially, count zeros only if
	 * bounded by ones.
	 * they are bounded by ones.
	 */
	 */
	clusters = 0;
	clusters = 0;
	zeros = 0;
	zeros = 0;
+8 −5
Original line number Original line Diff line number Diff line
@@ -85,10 +85,10 @@ __setup("noexec32=", nonx32_setup);


/*
/*
 * Copy data used in early init routines from the initial arrays to the
 * Copy data used in early init routines from the initial arrays to the
 * per cpu data areas.  These arrays then become expendable and the *_ptrs
 * per cpu data areas.  These arrays then become expendable and the
 * are zeroed indicating that the static arrays are gone.
 * *_early_ptr's are zeroed indicating that the static arrays are gone.
 */
 */
void __init setup_percpu_maps(void)
static void __init setup_per_cpu_maps(void)
{
{
	int cpu;
	int cpu;


@@ -98,6 +98,8 @@ void __init setup_percpu_maps(void)
#endif
#endif
			per_cpu(x86_cpu_to_apicid, cpu) =
			per_cpu(x86_cpu_to_apicid, cpu) =
						x86_cpu_to_apicid_init[cpu];
						x86_cpu_to_apicid_init[cpu];
			per_cpu(x86_bios_cpu_apicid, cpu) =
						x86_bios_cpu_apicid_init[cpu];
#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
			per_cpu(x86_cpu_to_node_map, cpu) =
			per_cpu(x86_cpu_to_node_map, cpu) =
						x86_cpu_to_node_map_init[cpu];
						x86_cpu_to_node_map_init[cpu];
@@ -110,8 +112,9 @@ void __init setup_percpu_maps(void)
#endif
#endif
	}
	}


	/* indicate the early static arrays are gone */
	/* indicate the early static arrays will soon be gone */
	x86_cpu_to_apicid_early_ptr = NULL;
	x86_cpu_to_apicid_early_ptr = NULL;
	x86_bios_cpu_apicid_early_ptr = NULL;
#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
	x86_cpu_to_node_map_early_ptr = NULL;
	x86_cpu_to_node_map_early_ptr = NULL;
#endif
#endif
@@ -152,7 +155,7 @@ void __init setup_per_cpu_areas(void)
	}
	}


	/* setup percpu data maps early */
	/* setup percpu data maps early */
	setup_percpu_maps();
	setup_per_cpu_maps();
} 
} 


void pda_init(int cpu)
void pda_init(int cpu)
+1 −0
Original line number Original line Diff line number Diff line
@@ -334,6 +334,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	/* setup to use the early static init tables during kernel startup */
	/* setup to use the early static init tables during kernel startup */
	x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
	x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
	x86_bios_cpu_apicid_early_ptr = (void *)&x86_bios_cpu_apicid_init;
#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
	x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
	x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
#endif
#endif
+1 −1
Original line number Original line Diff line number Diff line
@@ -465,7 +465,7 @@ cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_to_cpumask_map);
/* which node each logical CPU is on */
/* which node each logical CPU is on */
u8 cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
EXPORT_SYMBOL(cpu_to_node_map);
EXPORT_SYMBOL(cpu_to_node_map);


/* set up a mapping between cpu and node. */
/* set up a mapping between cpu and node. */
+1 −1
Original line number Original line Diff line number Diff line
@@ -397,7 +397,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
	[0 ... MAX_NUMNODES-1] = PXM_INVAL
	[0 ... MAX_NUMNODES-1] = PXM_INVAL
};
};
static u16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
};
};
static int __init find_node_by_addr(unsigned long addr)
static int __init find_node_by_addr(unsigned long addr)