Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f3fa8ebc authored by Rohit Seth's avatar Rohit Seth Committed by Linus Torvalds
Browse files

[PATCH] x86_64: moving phys_proc_id and cpu_core_id to cpuinfo_x86



Most of the fields of cpuinfo are defined in cpuinfo_x86 structure.
This patch moves the phys_proc_id and cpu_core_id for each processor to
cpuinfo_x86 structure as well.

Signed-off-by: default avatarRohit Seth <rohitseth@google.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e465058d
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
		per_cpu(bank_map, cpu) |= (1 << bank);

#ifdef CONFIG_SMP
		if (shared_bank[bank] && cpu_core_id[cpu])
		if (shared_bank[bank] && c->cpu_core_id)
			continue;
#endif

@@ -323,10 +323,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
	struct threshold_bank *b = NULL;

#ifdef CONFIG_SMP
	if (cpu_core_id[cpu] && shared_bank[bank]) {	/* symlink */
	if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) {	/* symlink */
		char name[16];
		unsigned lcpu = first_cpu(cpu_core_map[cpu]);
		if (cpu_core_id[lcpu])
		if (cpu_data[lcpu].cpu_core_id)
			goto out;	/* first core not up yet */

		b = per_cpu(threshold_banks, lcpu)[bank];
@@ -434,7 +434,7 @@ static __cpuinit int threshold_create_symlinks(unsigned int cpu)
	int bank, err = 0;
	unsigned int lcpu = 0;

	if (cpu_core_id[cpu])
	if (cpu_data[cpu].cpu_core_id)
		return 0;
	for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
		if (lcpu == cpu)
@@ -455,7 +455,7 @@ static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
{
	int bank;
	unsigned int lcpu = 0;
	if (cpu_core_id[cpu])
	if (cpu_data[cpu].cpu_core_id)
		return;
	for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
		if (lcpu == cpu)
+0 −3
Original line number Diff line number Diff line
@@ -680,9 +680,6 @@ void __init gart_iommu_init(void)
	dma_ops = &gart_dma_ops;
} 

/* Must execute after PCI subsystem */
fs_initcall(gart_iommu_init);

void gart_parse_options(char *p)
{
	int arg;
+13 −14
Original line number Diff line number Diff line
@@ -785,9 +785,9 @@ static int nearby_node(int apicid)
static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
	int cpu = smp_processor_id();
	unsigned bits;
#ifdef CONFIG_NUMA
	int cpu = smp_processor_id();
	int node = 0;
	unsigned apicid = hard_smp_processor_id();
#endif
@@ -805,12 +805,12 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
	}

	/* Low order bits define the core id (index of core in socket) */
	cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
	/* Convert the APIC ID into the socket ID */
	phys_proc_id[cpu] = phys_pkg_id(bits);
	c->phys_proc_id = phys_pkg_id(bits);

#ifdef CONFIG_NUMA
  	node = phys_proc_id[cpu];
  	node = c->phys_proc_id;
 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
 		node = apicid_to_node[apicid];
 	if (!node_online(node)) {
@@ -823,7 +823,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
 		   but in the same order as the HT nodeids.
 		   If that doesn't result in a usable node fall back to the
 		   path for the previous case.  */
 		int ht_nodeid = apicid - (phys_proc_id[0] << bits);
 		int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
 		if (ht_nodeid >= 0 &&
 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 			node = apicid_to_node[ht_nodeid];
@@ -834,7 +834,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
	numa_set_node(cpu, node);

  	printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
  			cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
  			cpu, apicid, c->x86_max_cores, node, c->cpu_core_id);
#endif
#endif
}
@@ -905,7 +905,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
	u32 	eax, ebx, ecx, edx;
	int 	index_msb, core_bits;
	int 	cpu = smp_processor_id();

	cpuid(1, &eax, &ebx, &ecx, &edx);

@@ -926,10 +925,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
		}

		index_msb = get_count_order(smp_num_siblings);
		phys_proc_id[cpu] = phys_pkg_id(index_msb);
		c->phys_proc_id = phys_pkg_id(index_msb);

		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
		       phys_proc_id[cpu]);
		       c->phys_proc_id);

		smp_num_siblings = smp_num_siblings / c->x86_max_cores;

@@ -937,12 +936,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)

		core_bits = get_count_order(c->x86_max_cores);

		cpu_core_id[cpu] = phys_pkg_id(index_msb) &
		c->cpu_core_id = phys_pkg_id(index_msb) &
					       ((1 << core_bits) - 1);

		if (c->x86_max_cores > 1)
			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
			       cpu_core_id[cpu]);
			       c->cpu_core_id);
	}
#endif
}
@@ -1080,7 +1079,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
	}

#ifdef CONFIG_SMP
	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
#endif
}

@@ -1288,9 +1287,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
	if (smp_num_siblings * c->x86_max_cores > 1) {
		int cpu = c - cpu_data;
		seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
		seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
		seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
	}
#endif	
+5 −9
Original line number Diff line number Diff line
@@ -63,10 +63,6 @@

/* Number of siblings per CPU package */
int smp_num_siblings = 1;
/* Package ID of each logical CPU */
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* core ID of each logical CPU */
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };

/* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata  = {[0 ... NR_CPUS-1] = BAD_APICID};
@@ -472,8 +468,8 @@ static inline void set_cpu_sibling_map(int cpu)

	if (smp_num_siblings > 1) {
		for_each_cpu_mask(i, cpu_sibling_setup_map) {
			if (phys_proc_id[cpu] == phys_proc_id[i] &&
			    cpu_core_id[cpu] == cpu_core_id[i]) {
			if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
			    c[cpu].cpu_core_id == c[i].cpu_core_id) {
				cpu_set(i, cpu_sibling_map[cpu]);
				cpu_set(cpu, cpu_sibling_map[i]);
				cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +496,7 @@ static inline void set_cpu_sibling_map(int cpu)
			cpu_set(i, c[cpu].llc_shared_map);
			cpu_set(cpu, c[i].llc_shared_map);
		}
		if (phys_proc_id[cpu] == phys_proc_id[i]) {
		if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
			cpu_set(i, cpu_core_map[cpu]);
			cpu_set(cpu, cpu_core_map[i]);
			/*
@@ -1201,8 +1197,8 @@ static void remove_siblinginfo(int cpu)
		cpu_clear(cpu, cpu_sibling_map[sibling]);
	cpus_clear(cpu_sibling_map[cpu]);
	cpus_clear(cpu_core_map[cpu]);
	phys_proc_id[cpu] = BAD_APICID;
	cpu_core_id[cpu] = BAD_APICID;
	c[cpu].phys_proc_id = 0;
	c[cpu].cpu_core_id = 0;
	cpu_clear(cpu, cpu_sibling_setup_map);
}

+4 −0
Original line number Diff line number Diff line
@@ -69,7 +69,11 @@ struct cpuinfo_x86 {
	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */
#endif
	__u8	apicid;
#ifdef CONFIG_SMP
	__u8	booted_cores;	/* number of cores as seen by OS */
	__u8	phys_proc_id;	/* Physical Processor id. */
	__u8	cpu_core_id;	/* Core id. */
#endif
} ____cacheline_aligned;

#define X86_VENDOR_INTEL 0
Loading