Loading kernel/sched/debug.c +25 −0 Original line number Original line Diff line number Diff line Loading @@ -744,6 +744,21 @@ do { \ P(cpu_load[2]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[3]); P(cpu_load[4]); P(cpu_load[4]); #ifdef CONFIG_SMP P(cpu_capacity); #endif #ifdef CONFIG_SCHED_WALT P(cluster->load_scale_factor); P(cluster->capacity); P(cluster->max_possible_capacity); P(cluster->efficiency); P(cluster->cur_freq); P(cluster->max_freq); P(cluster->exec_scale_factor); P(walt_stats.nr_big_tasks); SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg", rq->walt_stats.cumulative_runnable_avg); #endif #undef P #undef P #undef PN #undef PN Loading Loading @@ -822,6 +837,13 @@ static void sched_debug_header(struct seq_file *m) PN(sysctl_sched_wakeup_granularity); PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); P(sysctl_sched_features); #ifdef CONFIG_SCHED_WALT P(sched_init_task_load_windows); P(min_capacity); P(max_capacity); P(sched_ravg_window); P(sched_load_granule); #endif #undef PN #undef PN #undef P #undef P Loading Loading @@ -1042,6 +1064,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_idle); P_SCHEDSTAT(se.statistics.nr_wakeups_idle); #ifdef CONFIG_SCHED_WALT P(ravg.demand); #endif avg_atom = p->se.sum_exec_runtime; avg_atom = p->se.sum_exec_runtime; if (nr_switches) if (nr_switches) avg_atom = div64_ul(avg_atom, nr_switches); avg_atom = div64_ul(avg_atom, nr_switches); Loading kernel/sched/energy.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -173,7 +173,7 @@ static int sched_energy_probe(struct platform_device *pdev) for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { struct device *cpu_dev; struct device *cpu_dev; struct dev_pm_opp *opp; struct dev_pm_opp *opp; int efficiency = cpu > 3 ? 1740 : 1024; int efficiency = topology_get_cpu_scale(NULL, cpu); max_efficiency = max(efficiency, max_efficiency); max_efficiency = max(efficiency, max_efficiency); Loading Loading @@ -209,7 +209,7 @@ static int sched_energy_probe(struct platform_device *pdev) for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { unsigned long cpu_max_cap; unsigned long cpu_max_cap; struct sched_group_energy *sge_l0, *sge; struct sched_group_energy *sge_l0, *sge; int efficiency = cpu > 3 ? 1740 : 1024; int efficiency = topology_get_cpu_scale(NULL, cpu); cpu_max_cap = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpu_max_cap = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * max_frequencies[cpu], max_freq); max_frequencies[cpu], max_freq); Loading kernel/sched/walt.c +5 −10 Original line number Original line Diff line number Diff line Loading @@ -2026,13 +2026,6 @@ void mark_task_starting(struct task_struct *p) update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq)); } } unsigned long __weak arch_get_cpu_efficiency(int cpu) { if (cpu > 3) return (SCHED_CAPACITY_SCALE * 17) / 10; return SCHED_CAPACITY_SCALE; } static cpumask_t all_cluster_cpus = CPU_MASK_NONE; static cpumask_t all_cluster_cpus = CPU_MASK_NONE; DECLARE_BITMAP(all_cluster_ids, NR_CPUS); DECLARE_BITMAP(all_cluster_ids, NR_CPUS); struct sched_cluster *sched_cluster[NR_CPUS]; struct sched_cluster *sched_cluster[NR_CPUS]; Loading Loading @@ -2084,9 +2077,7 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus) raw_spin_lock_init(&cluster->load_lock); raw_spin_lock_init(&cluster->load_lock); cluster->cpus = *cpus; cluster->cpus = *cpus; // FIXME: cluster->efficiency = topology_get_cpu_scale(NULL, cpumask_first(cpus)); cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus)); cluster->efficiency = 1024; if (cluster->efficiency > max_possible_efficiency) if (cluster->efficiency > max_possible_efficiency) max_possible_efficiency = cluster->efficiency; max_possible_efficiency = cluster->efficiency; Loading Loading @@ -3118,4 +3109,8 @@ void walt_sched_init(struct rq *rq) walt_cpu_util_freq_divisor = walt_cpu_util_freq_divisor = (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; sched_init_task_load_windows = div64_u64((u64)sysctl_sched_init_task_load_pct * (u64)sched_ravg_window, 100); } } Loading
kernel/sched/debug.c +25 −0 Original line number Original line Diff line number Diff line Loading @@ -744,6 +744,21 @@ do { \ P(cpu_load[2]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[3]); P(cpu_load[4]); P(cpu_load[4]); #ifdef CONFIG_SMP P(cpu_capacity); #endif #ifdef CONFIG_SCHED_WALT P(cluster->load_scale_factor); P(cluster->capacity); P(cluster->max_possible_capacity); P(cluster->efficiency); P(cluster->cur_freq); P(cluster->max_freq); P(cluster->exec_scale_factor); P(walt_stats.nr_big_tasks); SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg", rq->walt_stats.cumulative_runnable_avg); #endif #undef P #undef P #undef PN #undef PN Loading Loading @@ -822,6 +837,13 @@ static void sched_debug_header(struct seq_file *m) PN(sysctl_sched_wakeup_granularity); PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); P(sysctl_sched_features); #ifdef CONFIG_SCHED_WALT P(sched_init_task_load_windows); P(min_capacity); P(max_capacity); P(sched_ravg_window); P(sched_load_granule); #endif #undef PN #undef PN #undef P #undef P Loading Loading @@ -1042,6 +1064,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_idle); P_SCHEDSTAT(se.statistics.nr_wakeups_idle); #ifdef CONFIG_SCHED_WALT P(ravg.demand); #endif avg_atom = p->se.sum_exec_runtime; avg_atom = p->se.sum_exec_runtime; if (nr_switches) if (nr_switches) avg_atom = div64_ul(avg_atom, nr_switches); avg_atom = div64_ul(avg_atom, nr_switches); Loading
kernel/sched/energy.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -173,7 +173,7 @@ static int sched_energy_probe(struct platform_device *pdev) for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { struct device *cpu_dev; struct device *cpu_dev; struct dev_pm_opp *opp; struct dev_pm_opp *opp; int efficiency = cpu > 3 ? 1740 : 1024; int efficiency = topology_get_cpu_scale(NULL, cpu); max_efficiency = max(efficiency, max_efficiency); max_efficiency = max(efficiency, max_efficiency); Loading Loading @@ -209,7 +209,7 @@ static int sched_energy_probe(struct platform_device *pdev) for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { unsigned long cpu_max_cap; unsigned long cpu_max_cap; struct sched_group_energy *sge_l0, *sge; struct sched_group_energy *sge_l0, *sge; int efficiency = cpu > 3 ? 1740 : 1024; int efficiency = topology_get_cpu_scale(NULL, cpu); cpu_max_cap = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpu_max_cap = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * max_frequencies[cpu], max_freq); max_frequencies[cpu], max_freq); Loading
kernel/sched/walt.c +5 −10 Original line number Original line Diff line number Diff line Loading @@ -2026,13 +2026,6 @@ void mark_task_starting(struct task_struct *p) update_task_cpu_cycles(p, cpu_of(rq)); update_task_cpu_cycles(p, cpu_of(rq)); } } unsigned long __weak arch_get_cpu_efficiency(int cpu) { if (cpu > 3) return (SCHED_CAPACITY_SCALE * 17) / 10; return SCHED_CAPACITY_SCALE; } static cpumask_t all_cluster_cpus = CPU_MASK_NONE; static cpumask_t all_cluster_cpus = CPU_MASK_NONE; DECLARE_BITMAP(all_cluster_ids, NR_CPUS); DECLARE_BITMAP(all_cluster_ids, NR_CPUS); struct sched_cluster *sched_cluster[NR_CPUS]; struct sched_cluster *sched_cluster[NR_CPUS]; Loading Loading @@ -2084,9 +2077,7 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus) raw_spin_lock_init(&cluster->load_lock); raw_spin_lock_init(&cluster->load_lock); cluster->cpus = *cpus; cluster->cpus = *cpus; // FIXME: cluster->efficiency = topology_get_cpu_scale(NULL, cpumask_first(cpus)); cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus)); cluster->efficiency = 1024; if (cluster->efficiency > max_possible_efficiency) if (cluster->efficiency > max_possible_efficiency) max_possible_efficiency = cluster->efficiency; max_possible_efficiency = cluster->efficiency; Loading Loading @@ -3118,4 +3109,8 @@ void walt_sched_init(struct rq *rq) walt_cpu_util_freq_divisor = walt_cpu_util_freq_divisor = (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; sched_init_task_load_windows = div64_u64((u64)sysctl_sched_init_task_load_pct * (u64)sched_ravg_window, 100); } }