Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 502f4d4f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Fix and clean up generic_processor_info()
  x86: Don't copy per_cpu cpuinfo for BSP two times
  x86: Move llc_shared_map out of cpu_info
parents da849abe e5fea868
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -94,10 +94,6 @@ struct cpuinfo_x86 {
	int			x86_cache_alignment;	/* In bytes */
	int			x86_power;
	unsigned long		loops_per_jiffy;
#ifdef CONFIG_SMP
	/* cpus sharing the last level cache: */
	cpumask_var_t		llc_shared_map;
#endif
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
	u16			apicid;
+7 −0
Original line number Diff line number Diff line
@@ -33,6 +33,8 @@ static inline bool cpu_has_ht_siblings(void)

DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */
DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
DECLARE_PER_CPU(int, cpu_number);

@@ -46,6 +48,11 @@ static inline struct cpumask *cpu_core_mask(int cpu)
	return per_cpu(cpu_core_map, cpu);
}

static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
	return per_cpu(cpu_llc_shared_map, cpu);
}

DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);

+20 −19
Original line number Diff line number Diff line
@@ -1930,17 +1930,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
{
	int cpu;

	/*
	 * Validate version
	 */
	if (version == 0x0) {
		pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
			   "fixing up to 0x10. (tell your hw vendor)\n",
				version);
		version = 0x10;
	}
	apic_version[apicid] = version;

	if (num_processors >= nr_cpu_ids) {
		int max = nr_cpu_ids;
		int thiscpu = max + disabled_cpus;
@@ -1954,22 +1943,34 @@ void __cpuinit generic_processor_info(int apicid, int version)
	}

	num_processors++;
	cpu = cpumask_next_zero(-1, cpu_present_mask);

	if (version != apic_version[boot_cpu_physical_apicid])
		WARN_ONCE(1,
			"ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
			apic_version[boot_cpu_physical_apicid], cpu, version);

	physid_set(apicid, phys_cpu_present_map);
	if (apicid == boot_cpu_physical_apicid) {
		/*
		 * x86_bios_cpu_apicid is required to have processors listed
		 * in same order as logical cpu numbers. Hence the first
		 * entry is BSP, and so on.
		 * boot_cpu_init() already hold bit 0 in cpu_present_mask
		 * for BSP.
		 */
		cpu = 0;
	} else
		cpu = cpumask_next_zero(-1, cpu_present_mask);

	/*
	 * Validate version
	 */
	if (version == 0x0) {
		pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
			   cpu, apicid);
		version = 0x10;
	}
	apic_version[apicid] = version;

	if (version != apic_version[boot_cpu_physical_apicid]) {
		pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
			apic_version[boot_cpu_physical_apicid], cpu, version);
	}

	physid_set(apicid, phys_cpu_present_map);
	if (apicid > max_physical_apicid)
		max_physical_apicid = apicid;

+2 −2
Original line number Diff line number Diff line
@@ -732,11 +732,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
	struct cpuinfo_x86 *c = &cpu_data(cpu);

	if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
		for_each_cpu(i, c->llc_shared_map) {
		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
			if (!per_cpu(ici_cpuid4_info, i))
				continue;
			this_leaf = CPUID4_INFO_IDX(i, index);
			for_each_cpu(sibling, c->llc_shared_map) {
			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
				if (!cpu_online(sibling))
					continue;
				set_bit(sibling, this_leaf->shared_cpu_map);
+2 −5
Original line number Diff line number Diff line
@@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
	int i, err = 0;
	struct threshold_bank *b = NULL;
	char name[32];
#ifdef CONFIG_SMP
	struct cpuinfo_x86 *c = &cpu_data(cpu);
#endif

	sprintf(name, "threshold_bank%i", bank);

#ifdef CONFIG_SMP
	if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {	/* symlink */
		i = cpumask_first(c->llc_shared_map);
		i = cpumask_first(cpu_llc_shared_mask(cpu));

		/* first core not up yet */
		if (cpu_data(i).cpu_core_id)
@@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
		if (err)
			goto out;

		cpumask_copy(b->cpus, c->llc_shared_map);
		cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
		per_cpu(threshold_banks, cpu)[bank] = b;

		goto out;
Loading