Loading kernel/sched.c +14 −7 Original line number Diff line number Diff line Loading @@ -67,13 +67,6 @@ unsigned long long __attribute__((weak)) sched_clock(void) return (unsigned long long)jiffies * (1000000000 / HZ); } /* * CPU frequency is/was unstable - start new by setting prev_clock_raw: */ void sched_clock_unstable_event(void) { } /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], Loading Loading @@ -629,6 +622,20 @@ static inline struct rq *this_rq_lock(void) return rq; } /* * CPU frequency is/was unstable - start new by setting prev_clock_raw: */ void sched_clock_unstable_event(void) { unsigned long flags; struct rq *rq; rq = task_rq_lock(current, &flags); rq->prev_clock_raw = sched_clock(); rq->clock_unstable_events++; task_rq_unlock(rq, &flags); } /* * resched_task - mark a task 'to be rescheduled now'. * Loading Loading
kernel/sched.c +14 −7 Original line number Diff line number Diff line Loading @@ -67,13 +67,6 @@ unsigned long long __attribute__((weak)) sched_clock(void) return (unsigned long long)jiffies * (1000000000 / HZ); } /* * CPU frequency is/was unstable - start new by setting prev_clock_raw: */ void sched_clock_unstable_event(void) { } /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], Loading Loading @@ -629,6 +622,20 @@ static inline struct rq *this_rq_lock(void) return rq; } /* * CPU frequency is/was unstable - start new by setting prev_clock_raw: */ void sched_clock_unstable_event(void) { unsigned long flags; struct rq *rq; rq = task_rq_lock(current, &flags); rq->prev_clock_raw = sched_clock(); rq->clock_unstable_events++; task_rq_unlock(rq, &flags); } /* * resched_task - mark a task 'to be rescheduled now'. * Loading