Loading kernel/sched/core.c +21 −18 Original line number Diff line number Diff line Loading @@ -1660,24 +1660,6 @@ static inline void migrate_sync_cpu(int cpu) sync_cpu = smp_processor_id(); } unsigned long sched_get_busy(int cpu) { unsigned long flags; struct rq *rq = cpu_rq(cpu); /* * This function could be called in timer context, and the * current task may have been executing for a long time. Ensure * that the window stats are current by doing an update. */ raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); raw_spin_unlock_irqrestore(&rq->lock, flags); return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), NSEC_PER_USEC); } static void reset_all_task_stats(void) { struct task_struct *g, *p; Loading Loading @@ -1804,6 +1786,26 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) local_irq_restore(flags); } #ifdef CONFIG_SCHED_FREQ_INPUT unsigned long sched_get_busy(int cpu) { unsigned long flags; struct rq *rq = cpu_rq(cpu); /* * This function could be called in timer context, and the * current task may have been executing for a long time. Ensure * that the window stats are current by doing an update. */ raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); raw_spin_unlock_irqrestore(&rq->lock, flags); return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), NSEC_PER_USEC); } void sched_set_io_is_busy(int val) { sched_io_is_busy = val; Loading Loading @@ -1836,6 +1838,7 @@ int sched_set_window(u64 window_start, unsigned int window_size) return 0; } #endif /* CONFIG_SCHED_FREQ_INPUT */ /* Keep track of max/min capacity possible across CPUs "currently" */ static void update_min_max_capacity(void) Loading Loading
kernel/sched/core.c +21 −18 Original line number Diff line number Diff line Loading @@ -1660,24 +1660,6 @@ static inline void migrate_sync_cpu(int cpu) sync_cpu = smp_processor_id(); } unsigned long sched_get_busy(int cpu) { unsigned long flags; struct rq *rq = cpu_rq(cpu); /* * This function could be called in timer context, and the * current task may have been executing for a long time. Ensure * that the window stats are current by doing an update. */ raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); raw_spin_unlock_irqrestore(&rq->lock, flags); return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), NSEC_PER_USEC); } static void reset_all_task_stats(void) { struct task_struct *g, *p; Loading Loading @@ -1804,6 +1786,26 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) local_irq_restore(flags); } #ifdef CONFIG_SCHED_FREQ_INPUT unsigned long sched_get_busy(int cpu) { unsigned long flags; struct rq *rq = cpu_rq(cpu); /* * This function could be called in timer context, and the * current task may have been executing for a long time. Ensure * that the window stats are current by doing an update. */ raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); raw_spin_unlock_irqrestore(&rq->lock, flags); return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), NSEC_PER_USEC); } void sched_set_io_is_busy(int val) { sched_io_is_busy = val; Loading Loading @@ -1836,6 +1838,7 @@ int sched_set_window(u64 window_start, unsigned int window_size) return 0; } #endif /* CONFIG_SCHED_FREQ_INPUT */ /* Keep track of max/min capacity possible across CPUs "currently" */ static void update_min_max_capacity(void) Loading