Loading include/linux/sched.h +1 −1 Original line number Diff line number Diff line Loading @@ -164,7 +164,7 @@ extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long this_cpu_load(void); extern void sched_update_nr_prod(int cpu, long delta, bool inc); extern void sched_get_nr_running_avg(int *avg, int *iowait_avg); extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg); extern void calc_global_load(unsigned long ticks); extern void update_cpu_load_nohz(void); Loading include/trace/events/sched.h +22 −0 Original line number Diff line number Diff line Loading @@ -902,6 +902,28 @@ TRACE_EVENT(sched_pi_setprio, __entry->oldprio, __entry->newprio) ); TRACE_EVENT(sched_get_nr_running_avg, TP_PROTO(int avg, int big_avg, int iowait_avg), TP_ARGS(avg, big_avg, iowait_avg), TP_STRUCT__entry( __field( int, avg ) __field( int, big_avg ) __field( int, iowait_avg ) ), TP_fast_assign( __entry->avg = avg; __entry->big_avg = big_avg; __entry->iowait_avg = iowait_avg; ), TP_printk("avg=%d big_avg=%d iowait_avg=%d", __entry->avg, __entry->big_avg, __entry->iowait_avg) ); #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ Loading kernel/sched/fair.c +24 −0 Original line number Diff line number Diff line Loading @@ -2248,6 +2248,30 @@ static inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { } #endif /* CONFIG_CFS_BANDWIDTH */ /* * Return total number of tasks "eligible" to run on highest capacity cpu * * This is simply nr_big_tasks for cpus which are not of max_capacity and * (nr_running - nr_small_tasks) for cpus of max_capacity */ unsigned int nr_eligible_big_tasks(int cpu) { struct rq *rq = cpu_rq(cpu); int nr_big = rq->hmp_stats.nr_big_tasks; int nr = rq->nr_running; int nr_small = rq->hmp_stats.nr_small_tasks; if (rq->capacity != max_capacity) return nr_big; /* Consider all (except small) tasks on max_capacity cpu as big tasks */ nr_big = nr - nr_small; if (nr_big < 0) nr_big = 0; return nr_big; } /* * reset_cpu_hmp_stats - reset HMP stats for a cpu * nr_big_tasks, nr_small_tasks Loading kernel/sched/sched.h +6 −0 Original line number Diff line number Diff line Loading @@ -749,6 +749,7 @@ unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); unsigned int cpu_temp(int cpu); extern unsigned int nr_eligible_big_tasks(int cpu); static inline int capacity(struct rq *rq) { Loading Loading @@ -822,6 +823,11 @@ static inline int sched_cpu_high_irqload(int cpu) struct hmp_sched_stats; static inline unsigned int nr_eligible_big_tasks(int cpu) { return 0; } static inline int pct_task_load(struct task_struct *p) { return 0; } static inline int capacity(struct rq *rq) Loading kernel/sched/sched_avg.c +25 −7 Original line number Diff line number Diff line Loading @@ -18,32 +18,38 @@ #include <linux/hrtimer.h> #include <linux/sched.h> #include <linux/math64.h> #include <trace/events/sched.h> #include "sched.h" static DEFINE_PER_CPU(u64, nr_prod_sum); static DEFINE_PER_CPU(u64, last_time); static DEFINE_PER_CPU(u64, nr_big_prod_sum); static DEFINE_PER_CPU(u64, nr); static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static s64 last_get_time; /** * sched_get_nr_running_avg * @return: Average nr_running and iowait value since last poll. * @return: Average nr_running, iowait and nr_big_tasks value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ void sched_get_nr_running_avg(int *avg, int *iowait_avg) void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg) { int cpu; u64 curr_time = sched_clock(); u64 diff = curr_time - last_get_time; u64 tmp_avg = 0, tmp_iowait = 0; u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0; *avg = 0; *iowait_avg = 0; *big_avg = 0; if (!diff) return; Loading @@ -57,12 +63,21 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg) tmp_avg += per_cpu(nr_prod_sum, cpu); tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu)); tmp_big_avg += per_cpu(nr_big_prod_sum, cpu); tmp_big_avg += nr_eligible_big_tasks(cpu) * (curr_time - per_cpu(last_time, cpu)); tmp_iowait += per_cpu(iowait_prod_sum, cpu); tmp_iowait += nr_iowait_cpu(cpu) * (curr_time - per_cpu(last_time, cpu)); per_cpu(last_time, cpu) = curr_time; per_cpu(nr_prod_sum, cpu) = 0; per_cpu(nr_big_prod_sum, cpu) = 0; per_cpu(iowait_prod_sum, cpu) = 0; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } Loading @@ -70,12 +85,14 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg) last_get_time = curr_time; *avg = (int)div64_u64(tmp_avg * 100, diff); *big_avg = (int)div64_u64(tmp_big_avg * 100, diff); *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff); BUG_ON(*avg < 0); pr_debug("%s - avg:%d\n", __func__, *avg); BUG_ON(*iowait_avg < 0); pr_debug("%s - iowait_avg:%d\n", __func__, *iowait_avg); trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg); BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0); pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n", __func__, *avg, *big_avg, *iowait_avg); } EXPORT_SYMBOL(sched_get_nr_running_avg); Loading Loading @@ -104,6 +121,7 @@ void sched_update_nr_prod(int cpu, long delta, bool inc) BUG_ON((s64)per_cpu(nr, cpu) < 0); per_cpu(nr_prod_sum, cpu) += nr_running * diff; per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } Loading Loading
include/linux/sched.h +1 −1 Original line number Diff line number Diff line Loading @@ -164,7 +164,7 @@ extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long this_cpu_load(void); extern void sched_update_nr_prod(int cpu, long delta, bool inc); extern void sched_get_nr_running_avg(int *avg, int *iowait_avg); extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg); extern void calc_global_load(unsigned long ticks); extern void update_cpu_load_nohz(void); Loading
include/trace/events/sched.h +22 −0 Original line number Diff line number Diff line Loading @@ -902,6 +902,28 @@ TRACE_EVENT(sched_pi_setprio, __entry->oldprio, __entry->newprio) ); TRACE_EVENT(sched_get_nr_running_avg, TP_PROTO(int avg, int big_avg, int iowait_avg), TP_ARGS(avg, big_avg, iowait_avg), TP_STRUCT__entry( __field( int, avg ) __field( int, big_avg ) __field( int, iowait_avg ) ), TP_fast_assign( __entry->avg = avg; __entry->big_avg = big_avg; __entry->iowait_avg = iowait_avg; ), TP_printk("avg=%d big_avg=%d iowait_avg=%d", __entry->avg, __entry->big_avg, __entry->iowait_avg) ); #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ Loading
kernel/sched/fair.c +24 −0 Original line number Diff line number Diff line Loading @@ -2248,6 +2248,30 @@ static inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { } #endif /* CONFIG_CFS_BANDWIDTH */ /* * Return total number of tasks "eligible" to run on highest capacity cpu * * This is simply nr_big_tasks for cpus which are not of max_capacity and * (nr_running - nr_small_tasks) for cpus of max_capacity */ unsigned int nr_eligible_big_tasks(int cpu) { struct rq *rq = cpu_rq(cpu); int nr_big = rq->hmp_stats.nr_big_tasks; int nr = rq->nr_running; int nr_small = rq->hmp_stats.nr_small_tasks; if (rq->capacity != max_capacity) return nr_big; /* Consider all (except small) tasks on max_capacity cpu as big tasks */ nr_big = nr - nr_small; if (nr_big < 0) nr_big = 0; return nr_big; } /* * reset_cpu_hmp_stats - reset HMP stats for a cpu * nr_big_tasks, nr_small_tasks Loading
kernel/sched/sched.h +6 −0 Original line number Diff line number Diff line Loading @@ -749,6 +749,7 @@ unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); unsigned int cpu_temp(int cpu); extern unsigned int nr_eligible_big_tasks(int cpu); static inline int capacity(struct rq *rq) { Loading Loading @@ -822,6 +823,11 @@ static inline int sched_cpu_high_irqload(int cpu) struct hmp_sched_stats; static inline unsigned int nr_eligible_big_tasks(int cpu) { return 0; } static inline int pct_task_load(struct task_struct *p) { return 0; } static inline int capacity(struct rq *rq) Loading
kernel/sched/sched_avg.c +25 −7 Original line number Diff line number Diff line Loading @@ -18,32 +18,38 @@ #include <linux/hrtimer.h> #include <linux/sched.h> #include <linux/math64.h> #include <trace/events/sched.h> #include "sched.h" static DEFINE_PER_CPU(u64, nr_prod_sum); static DEFINE_PER_CPU(u64, last_time); static DEFINE_PER_CPU(u64, nr_big_prod_sum); static DEFINE_PER_CPU(u64, nr); static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static s64 last_get_time; /** * sched_get_nr_running_avg * @return: Average nr_running and iowait value since last poll. * @return: Average nr_running, iowait and nr_big_tasks value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ void sched_get_nr_running_avg(int *avg, int *iowait_avg) void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg) { int cpu; u64 curr_time = sched_clock(); u64 diff = curr_time - last_get_time; u64 tmp_avg = 0, tmp_iowait = 0; u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0; *avg = 0; *iowait_avg = 0; *big_avg = 0; if (!diff) return; Loading @@ -57,12 +63,21 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg) tmp_avg += per_cpu(nr_prod_sum, cpu); tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu)); tmp_big_avg += per_cpu(nr_big_prod_sum, cpu); tmp_big_avg += nr_eligible_big_tasks(cpu) * (curr_time - per_cpu(last_time, cpu)); tmp_iowait += per_cpu(iowait_prod_sum, cpu); tmp_iowait += nr_iowait_cpu(cpu) * (curr_time - per_cpu(last_time, cpu)); per_cpu(last_time, cpu) = curr_time; per_cpu(nr_prod_sum, cpu) = 0; per_cpu(nr_big_prod_sum, cpu) = 0; per_cpu(iowait_prod_sum, cpu) = 0; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } Loading @@ -70,12 +85,14 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg) last_get_time = curr_time; *avg = (int)div64_u64(tmp_avg * 100, diff); *big_avg = (int)div64_u64(tmp_big_avg * 100, diff); *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff); BUG_ON(*avg < 0); pr_debug("%s - avg:%d\n", __func__, *avg); BUG_ON(*iowait_avg < 0); pr_debug("%s - iowait_avg:%d\n", __func__, *iowait_avg); trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg); BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0); pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n", __func__, *avg, *big_avg, *iowait_avg); } EXPORT_SYMBOL(sched_get_nr_running_avg); Loading Loading @@ -104,6 +121,7 @@ void sched_update_nr_prod(int cpu, long delta, bool inc) BUG_ON((s64)per_cpu(nr, cpu) < 0); per_cpu(nr_prod_sum, cpu) += nr_running * diff; per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } Loading