Loading kernel/sched.c +0 −47 Original line number Diff line number Diff line Loading @@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag) #endif /* CONFIG_SMP */ #ifdef CONFIG_CONTEXT_SWITCH_TRACER void ftrace_task(struct task_struct *p, void *__tr, void *__data) { #if 0 /* * trace timeline tree */ __trace_special(__tr, __data, p->pid, p->se.vruntime, p->se.sum_exec_runtime); #else /* * trace balance metrics */ __trace_special(__tr, __data, p->pid, p->se.avg_overlap, 0); #endif } void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data) { struct task_struct *p; struct sched_entity *se; struct rb_node *curr; struct rq *rq = __rq; if (rq->cfs.curr) { p = task_of(rq->cfs.curr); ftrace_task(p, __tr, __data); } if (rq->cfs.next) { p = task_of(rq->cfs.next); ftrace_task(p, __tr, __data); } for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) { se = rb_entry(curr, struct sched_entity, run_node); if (!entity_is_task(se)) continue; p = task_of(se); ftrace_task(p, __tr, __data); } } #endif /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread Loading kernel/sched_fair.c +0 −3 Original line number Diff line number Diff line Loading @@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, if (!(this_sd->flags & SD_WAKE_AFFINE)) return 0; ftrace_special(__LINE__, curr->se.avg_overlap, sync); ftrace_special(__LINE__, p->se.avg_overlap, -1); /* * If the currently running task will sleep within * a reasonable amount of time then attract this newly Loading Loading @@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) if (unlikely(se == pse)) return; ftrace_special(__LINE__, p->pid, se->last_wakeup); cfs_rq_of(pse)->next = pse; /* Loading kernel/trace/trace_sched_switch.c +2 −8 Original line number Diff line number Diff line Loading @@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { if (likely(disabled == 1)) tracing_sched_switch_trace(tr, data, prev, next, flags); if (trace_flags & TRACE_ITER_SCHED_TREE) ftrace_all_fair_tasks(__rq, tr, data); } atomic_dec(&data->disabled); local_irq_restore(flags); Loading @@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { if (likely(disabled == 1)) tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); if (trace_flags & TRACE_ITER_SCHED_TREE) ftrace_all_fair_tasks(__rq, tr, data); } atomic_dec(&data->disabled); local_irq_restore(flags); Loading Loading
kernel/sched.c +0 −47 Original line number Diff line number Diff line Loading @@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag) #endif /* CONFIG_SMP */ #ifdef CONFIG_CONTEXT_SWITCH_TRACER void ftrace_task(struct task_struct *p, void *__tr, void *__data) { #if 0 /* * trace timeline tree */ __trace_special(__tr, __data, p->pid, p->se.vruntime, p->se.sum_exec_runtime); #else /* * trace balance metrics */ __trace_special(__tr, __data, p->pid, p->se.avg_overlap, 0); #endif } void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data) { struct task_struct *p; struct sched_entity *se; struct rb_node *curr; struct rq *rq = __rq; if (rq->cfs.curr) { p = task_of(rq->cfs.curr); ftrace_task(p, __tr, __data); } if (rq->cfs.next) { p = task_of(rq->cfs.next); ftrace_task(p, __tr, __data); } for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) { se = rb_entry(curr, struct sched_entity, run_node); if (!entity_is_task(se)) continue; p = task_of(se); ftrace_task(p, __tr, __data); } } #endif /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread Loading
kernel/sched_fair.c +0 −3 Original line number Diff line number Diff line Loading @@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, if (!(this_sd->flags & SD_WAKE_AFFINE)) return 0; ftrace_special(__LINE__, curr->se.avg_overlap, sync); ftrace_special(__LINE__, p->se.avg_overlap, -1); /* * If the currently running task will sleep within * a reasonable amount of time then attract this newly Loading Loading @@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) if (unlikely(se == pse)) return; ftrace_special(__LINE__, p->pid, se->last_wakeup); cfs_rq_of(pse)->next = pse; /* Loading
kernel/trace/trace_sched_switch.c +2 −8 Original line number Diff line number Diff line Loading @@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { if (likely(disabled == 1)) tracing_sched_switch_trace(tr, data, prev, next, flags); if (trace_flags & TRACE_ITER_SCHED_TREE) ftrace_all_fair_tasks(__rq, tr, data); } atomic_dec(&data->disabled); local_irq_restore(flags); Loading @@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { if (likely(disabled == 1)) tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); if (trace_flags & TRACE_ITER_SCHED_TREE) ftrace_all_fair_tasks(__rq, tr, data); } atomic_dec(&data->disabled); local_irq_restore(flags); Loading