Loading arch/arm64/include/asm/topology.h +2 −0 Original line number Diff line number Diff line Loading @@ -11,6 +11,7 @@ struct cpu_topology { int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t core_possible_sibling; cpumask_t llc_sibling; }; Loading @@ -26,6 +27,7 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); void remove_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); const struct cpumask *cpu_possible_coregroup_mask(int cpu); #ifdef CONFIG_NUMA Loading arch/arm64/kernel/topology.c +29 −0 Original line number Diff line number Diff line Loading @@ -213,6 +213,11 @@ static int __init parse_dt_topology(void) struct cpu_topology cpu_topology[NR_CPUS]; EXPORT_SYMBOL_GPL(cpu_topology); const struct cpumask *cpu_possible_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_possible_sibling; } const struct cpumask *cpu_coregroup_mask(int cpu) { const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); Loading @@ -230,6 +235,24 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return core_mask; } static void update_possible_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; int cpu; if (cpuid_topo->package_id == -1) return; for_each_possible_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; if (cpuid_topo->package_id != cpu_topo->package_id) continue; cpumask_set_cpu(cpuid, &cpu_topo->core_possible_sibling); cpumask_set_cpu(cpu, &cpuid_topo->core_possible_sibling); } } static void update_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; Loading Loading @@ -394,6 +417,8 @@ static inline int __init parse_acpi_topology(void) void __init init_cpu_topology(void) { int cpu; reset_cpu_topology(); /* Loading @@ -404,4 +429,8 @@ void __init init_cpu_topology(void) reset_cpu_topology(); else if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); else { for_each_possible_cpu(cpu) update_possible_siblings_masks(cpu); } } kernel/sched/walt.c +1 −1 Original line number Diff line number Diff line Loading @@ -2286,7 +2286,7 @@ void update_cluster_topology(void) INIT_LIST_HEAD(&new_head); for_each_cpu(i, &cpus) { cluster_cpus = cpu_coregroup_mask(i); cluster_cpus = cpu_possible_coregroup_mask(i); cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus); cpumask_andnot(&cpus, &cpus, cluster_cpus); add_cluster(cluster_cpus, &new_head); Loading Loading
arch/arm64/include/asm/topology.h +2 −0 Original line number Diff line number Diff line Loading @@ -11,6 +11,7 @@ struct cpu_topology { int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t core_possible_sibling; cpumask_t llc_sibling; }; Loading @@ -26,6 +27,7 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); void remove_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); const struct cpumask *cpu_possible_coregroup_mask(int cpu); #ifdef CONFIG_NUMA Loading
arch/arm64/kernel/topology.c +29 −0 Original line number Diff line number Diff line Loading @@ -213,6 +213,11 @@ static int __init parse_dt_topology(void) struct cpu_topology cpu_topology[NR_CPUS]; EXPORT_SYMBOL_GPL(cpu_topology); const struct cpumask *cpu_possible_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_possible_sibling; } const struct cpumask *cpu_coregroup_mask(int cpu) { const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); Loading @@ -230,6 +235,24 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return core_mask; } static void update_possible_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; int cpu; if (cpuid_topo->package_id == -1) return; for_each_possible_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; if (cpuid_topo->package_id != cpu_topo->package_id) continue; cpumask_set_cpu(cpuid, &cpu_topo->core_possible_sibling); cpumask_set_cpu(cpu, &cpuid_topo->core_possible_sibling); } } static void update_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; Loading Loading @@ -394,6 +417,8 @@ static inline int __init parse_acpi_topology(void) void __init init_cpu_topology(void) { int cpu; reset_cpu_topology(); /* Loading @@ -404,4 +429,8 @@ void __init init_cpu_topology(void) reset_cpu_topology(); else if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); else { for_each_possible_cpu(cpu) update_possible_siblings_masks(cpu); } }
kernel/sched/walt.c +1 −1 Original line number Diff line number Diff line Loading @@ -2286,7 +2286,7 @@ void update_cluster_topology(void) INIT_LIST_HEAD(&new_head); for_each_cpu(i, &cpus) { cluster_cpus = cpu_coregroup_mask(i); cluster_cpus = cpu_possible_coregroup_mask(i); cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus); cpumask_andnot(&cpus, &cpus, cluster_cpus); add_cluster(cluster_cpus, &new_head); Loading