Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 155dd720 authored by Rusty Russell's avatar Rusty Russell
Browse files

cpumask: convert struct cpuinfo_x86's llc_shared_map to cpumask_var_t



Impact: reduce kernel memory usage when CONFIG_CPUMASK_OFFSTACK=y

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent c032ef60
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
	unsigned long		loops_per_jiffy;
	unsigned long		loops_per_jiffy;
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	/* cpus sharing the last level cache: */
	/* cpus sharing the last level cache: */
	cpumask_t		llc_shared_map;
	cpumask_var_t		llc_shared_map;
#endif
#endif
	/* cpuid returned max cores value: */
	/* cpuid returned max cores value: */
	u16			 x86_max_cores;
	u16			 x86_max_cores;
+26 −7
Original line number Original line Diff line number Diff line
@@ -329,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
	cpu_idle();
	cpu_idle();
}
}


#ifdef CONFIG_CPUMASK_OFFSTACK
/* In this case, llc_shared_map is a pointer to a cpumask. */
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
				    const struct cpuinfo_x86 *src)
{
	struct cpumask *llc = dst->llc_shared_map;
	*dst = *src;
	dst->llc_shared_map = llc;
}
#else
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
				    const struct cpuinfo_x86 *src)
{
	*dst = *src;
}
#endif /* CONFIG_CPUMASK_OFFSTACK */

/*
/*
 * The bootstrap kernel entry code has set these up. Save them for
 * The bootstrap kernel entry code has set these up. Save them for
 * a given CPU
 * a given CPU
@@ -338,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
{
{
	struct cpuinfo_x86 *c = &cpu_data(id);
	struct cpuinfo_x86 *c = &cpu_data(id);


	*c = boot_cpu_data;
	copy_cpuinfo_x86(c, &boot_cpu_data);
	c->cpu_index = id;
	c->cpu_index = id;
	if (id != 0)
	if (id != 0)
		identify_secondary_cpu(c);
		identify_secondary_cpu(c);
@@ -362,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
				cpumask_set_cpu(cpu, cpu_sibling_mask(i));
				cpumask_set_cpu(cpu, cpu_sibling_mask(i));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(i, &c->llc_shared_map);
				cpumask_set_cpu(i, c->llc_shared_map);
				cpumask_set_cpu(cpu, &o->llc_shared_map);
				cpumask_set_cpu(cpu, o->llc_shared_map);
			}
			}
		}
		}
	} else {
	} else {
		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
	}
	}


	cpumask_set_cpu(cpu, &c->llc_shared_map);
	cpumask_set_cpu(cpu, c->llc_shared_map);


	if (current_cpu_data.x86_max_cores == 1) {
	if (current_cpu_data.x86_max_cores == 1) {
		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -381,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
	for_each_cpu(i, cpu_sibling_setup_mask) {
	for_each_cpu(i, cpu_sibling_setup_mask) {
		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
			cpumask_set_cpu(i, &c->llc_shared_map);
			cpumask_set_cpu(i, c->llc_shared_map);
			cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
			cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
		}
		}
		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
			cpumask_set_cpu(i, cpu_core_mask(cpu));
			cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -420,7 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
	if (sched_mc_power_savings || sched_smt_power_savings)
	if (sched_mc_power_savings || sched_smt_power_savings)
		return cpu_core_mask(cpu);
		return cpu_core_mask(cpu);
	else
	else
		return &c->llc_shared_map;
		return c->llc_shared_map;
}
}


static void impress_friends(void)
static void impress_friends(void)
@@ -1039,8 +1056,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
	for_each_possible_cpu(i) {
	for_each_possible_cpu(i) {
		alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
		cpumask_clear(per_cpu(cpu_core_map, i));
		cpumask_clear(per_cpu(cpu_core_map, i));
		cpumask_clear(per_cpu(cpu_sibling_map, i));
		cpumask_clear(per_cpu(cpu_sibling_map, i));
		cpumask_clear(cpu_data(i).llc_shared_map);
	}
	}
	set_cpu_sibling_map(0);
	set_cpu_sibling_map(0);