Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c69fc56d authored by Rusty Russell's avatar Rusty Russell
Browse files

cpumask: use topology_core_cpumask/topology_thread_cpumask instead of cpu_core_map/cpu_sibling_map



Impact: cleanup

This is presumably what those definitions are for, and while all archs
define cpu_core_map/cpu_sibling map, that's changing (eg. x86 wants to
change it to a pointer).

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent d95c3578
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ static inline int blk_cpu_to_group(int cpu)
	const struct cpumask *mask = cpu_coregroup_mask(cpu);
	return cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
	return first_cpu(per_cpu(cpu_sibling_map, cpu));
	return cpumask_first(topology_thread_cpumask(cpu));
#else
	return cpu;
#endif
+4 −4
Original line number Diff line number Diff line
@@ -7249,7 +7249,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
{
	int group;

	cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
	cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
	group = cpumask_first(mask);
	if (sg)
		*sg = &per_cpu(sched_group_core, group).sg;
@@ -7278,7 +7278,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
	cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
	group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
	cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
	cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
	group = cpumask_first(mask);
#else
	group = cpu;
@@ -7621,7 +7621,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
		SD_INIT(sd, SIBLING);
		set_domain_attribute(sd, attr);
		cpumask_and(sched_domain_span(sd),
			    &per_cpu(cpu_sibling_map, i), cpu_map);
			    topology_thread_cpumask(i), cpu_map);
		sd->parent = p;
		p->child = sd;
		cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7632,7 +7632,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
	/* Set up CPU (sibling) groups */
	for_each_cpu(i, cpu_map) {
		cpumask_and(this_sibling_map,
			    &per_cpu(cpu_sibling_map, i), cpu_map);
			    topology_thread_cpumask(i), cpu_map);
		if (i != cpumask_first(this_sibling_map))
			continue;