Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ddfb50a authored by Srinivas Ramana's avatar Srinivas Ramana Committed by Jeevan Shriram
Browse files

arm: topology: fix the topology reset path during init



init_cpu_topology has a bug where
1. It's referring to out of bounds array.
2. It's not actually resetting topology for each cpu.

Fix this by iterating for each cpu for resetting topology.

Change-Id: I4df03fb4fbe9cb636f135de982e35173b1900ec2
Signed-off-by: default avatarSrinivas Ramana <sramana@codeaurora.org>
Signed-off-by: default avatarJeevan Shriram <jshriram@codeaurora.org>
parent ab6b841b
Loading
Loading
Loading
Loading
+31 −19
Original line number Diff line number Diff line
@@ -240,6 +240,9 @@ static int __init parse_dt_topology(void)
	unsigned long capacity = 0;
	int cpu = 0, ret = 0;

	__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
				 GFP_NOWAIT);

	cn = of_find_node_by_path("/cpus");
	if (!cn) {
		pr_err("No CPU information found in DT\n");
@@ -266,9 +269,6 @@ static int __init parse_dt_topology(void)
		if (cpu_topology[cpu].socket_id == -1)
			ret = -EINVAL;

	__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
				 GFP_NOWAIT);

	for_each_possible_cpu(cpu) {
		const u32 *rate;
		int len;
@@ -397,7 +397,8 @@ static void update_siblings_masks(unsigned int cpuid)
		if (cpu != cpuid)
			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
	}
	smp_wmb();

	smp_wmb(); /* Ensure mask is updated*/
}

/*
@@ -589,38 +590,49 @@ static struct sched_domain_topology_level arm_topology[] = {
	{ NULL, },
};

/*
 * init_cpu_topology is called at boot when only one cpu is running
 * which prevent simultaneous write access to cpu_topology array
 */
void __init init_cpu_topology(void)
static void __init reset_cpu_topology(void)
{
	unsigned int cpu;

	/* init core mask and capacity */
	for_each_possible_cpu(cpu) {
		struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
		struct cputopo_arm *cpu_topo = &cpu_topology[cpu];

		cpu_topo->thread_id = -1;
		cpu_topo->core_id =  -1;
		cpu_topo->socket_id = -1;

		cpumask_clear(&cpu_topo->core_sibling);
		cpumask_clear(&cpu_topo->thread_sibling);
	}
	smp_wmb();
}

	if (parse_dt_topology()) {
		struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);

		cpu_topo->thread_id = -1;
		cpu_topo->core_id =  -1;
		cpu_topo->socket_id = -1;
		cpumask_clear(&cpu_topo->core_sibling);
		cpumask_clear(&cpu_topo->thread_sibling);
static void __init reset_cpu_capacity(void)
{
	unsigned int cpu;

	for_each_possible_cpu(cpu)
		set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
}

/*
 * init_cpu_topology is called at boot when only one cpu is running
 * which prevent simultaneous write access to cpu_topology array
 */
void __init init_cpu_topology(void)
{
	unsigned int cpu;

	/* init core mask and capacity */
	reset_cpu_topology();
	reset_cpu_capacity();
	smp_wmb(); /* Ensure CPU topology and capacity are up to date */

	if (parse_dt_topology()) {
		reset_cpu_topology();
		reset_cpu_capacity();
	}

	for_each_possible_cpu(cpu)
		update_siblings_masks(cpu);