Loading kernel/sched/core.c +7 −2 Original line number Original line Diff line number Diff line Loading @@ -3322,6 +3322,7 @@ struct preempt_store { u64 ts; u64 ts; unsigned long caddr[4]; unsigned long caddr[4]; bool irqs_disabled; bool irqs_disabled; bool is_idle_task; }; }; DEFINE_PER_CPU(struct preempt_store, the_ps); DEFINE_PER_CPU(struct preempt_store, the_ps); Loading @@ -3331,7 +3332,9 @@ DEFINE_PER_CPU(struct preempt_store, the_ps); */ */ static inline void preempt_latency_start(int val) static inline void preempt_latency_start(int val) { { struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id()); int cpu = raw_smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct preempt_store *ps = &per_cpu(the_ps, cpu); if (preempt_count() == val) { if (preempt_count() == val) { unsigned long ip = get_lock_parent_ip(); unsigned long ip = get_lock_parent_ip(); Loading @@ -3344,6 +3347,7 @@ static inline void preempt_latency_start(int val) ps->caddr[2] = CALLER_ADDR2; ps->caddr[2] = CALLER_ADDR2; ps->caddr[3] = CALLER_ADDR3; ps->caddr[3] = CALLER_ADDR3; ps->irqs_disabled = irqs_disabled(); ps->irqs_disabled = irqs_disabled(); ps->is_idle_task = (rq->curr == rq->idle); trace_preempt_off(CALLER_ADDR0, ip); trace_preempt_off(CALLER_ADDR0, ip); } } Loading Loading @@ -3386,7 +3390,8 @@ static inline void preempt_latency_stop(int val) * Trace preempt disable stack if preemption * Trace preempt disable stack if preemption * is disabled for more than the threshold. * is disabled for more than the threshold. */ */ if (delta > sysctl_preemptoff_tracing_threshold_ns) if (!ps->is_idle_task && delta > sysctl_preemptoff_tracing_threshold_ns) trace_sched_preempt_disable(delta, ps->irqs_disabled, trace_sched_preempt_disable(delta, ps->irqs_disabled, ps->caddr[0], ps->caddr[1], ps->caddr[0], ps->caddr[1], ps->caddr[2], ps->caddr[3]); ps->caddr[2], ps->caddr[3]); Loading kernel/trace/trace_irqsoff.c +3 −1 Original line number Original line Diff line number Diff line Loading @@ -14,6 +14,7 @@ #include <linux/uaccess.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/ftrace.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/clock.h> #include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h> Loading Loading @@ -636,7 +637,8 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1) is = &per_cpu(the_irqsoff, raw_smp_processor_id()); is = &per_cpu(the_irqsoff, raw_smp_processor_id()); delta = sched_clock() - is->ts; delta = sched_clock() - is->ts; if (delta > sysctl_irqsoff_tracing_threshold_ns) if (!is_idle_task(current) && delta > sysctl_irqsoff_tracing_threshold_ns) trace_irqs_disable(delta, is->caddr[0], is->caddr[1], trace_irqs_disable(delta, is->caddr[0], is->caddr[1], is->caddr[2], is->caddr[3]); is->caddr[2], is->caddr[3]); lockdep_on(); lockdep_on(); Loading Loading
kernel/sched/core.c +7 −2 Original line number Original line Diff line number Diff line Loading @@ -3322,6 +3322,7 @@ struct preempt_store { u64 ts; u64 ts; unsigned long caddr[4]; unsigned long caddr[4]; bool irqs_disabled; bool irqs_disabled; bool is_idle_task; }; }; DEFINE_PER_CPU(struct preempt_store, the_ps); DEFINE_PER_CPU(struct preempt_store, the_ps); Loading @@ -3331,7 +3332,9 @@ DEFINE_PER_CPU(struct preempt_store, the_ps); */ */ static inline void preempt_latency_start(int val) static inline void preempt_latency_start(int val) { { struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id()); int cpu = raw_smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct preempt_store *ps = &per_cpu(the_ps, cpu); if (preempt_count() == val) { if (preempt_count() == val) { unsigned long ip = get_lock_parent_ip(); unsigned long ip = get_lock_parent_ip(); Loading @@ -3344,6 +3347,7 @@ static inline void preempt_latency_start(int val) ps->caddr[2] = CALLER_ADDR2; ps->caddr[2] = CALLER_ADDR2; ps->caddr[3] = CALLER_ADDR3; ps->caddr[3] = CALLER_ADDR3; ps->irqs_disabled = irqs_disabled(); ps->irqs_disabled = irqs_disabled(); ps->is_idle_task = (rq->curr == rq->idle); trace_preempt_off(CALLER_ADDR0, ip); trace_preempt_off(CALLER_ADDR0, ip); } } Loading Loading @@ -3386,7 +3390,8 @@ static inline void preempt_latency_stop(int val) * Trace preempt disable stack if preemption * Trace preempt disable stack if preemption * is disabled for more than the threshold. * is disabled for more than the threshold. */ */ if (delta > sysctl_preemptoff_tracing_threshold_ns) if (!ps->is_idle_task && delta > sysctl_preemptoff_tracing_threshold_ns) trace_sched_preempt_disable(delta, ps->irqs_disabled, trace_sched_preempt_disable(delta, ps->irqs_disabled, ps->caddr[0], ps->caddr[1], ps->caddr[0], ps->caddr[1], ps->caddr[2], ps->caddr[3]); ps->caddr[2], ps->caddr[3]); Loading
kernel/trace/trace_irqsoff.c +3 −1 Original line number Original line Diff line number Diff line Loading @@ -14,6 +14,7 @@ #include <linux/uaccess.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/ftrace.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/clock.h> #include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h> Loading Loading @@ -636,7 +637,8 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1) is = &per_cpu(the_irqsoff, raw_smp_processor_id()); is = &per_cpu(the_irqsoff, raw_smp_processor_id()); delta = sched_clock() - is->ts; delta = sched_clock() - is->ts; if (delta > sysctl_irqsoff_tracing_threshold_ns) if (!is_idle_task(current) && delta > sysctl_irqsoff_tracing_threshold_ns) trace_irqs_disable(delta, is->caddr[0], is->caddr[1], trace_irqs_disable(delta, is->caddr[0], is->caddr[1], is->caddr[2], is->caddr[3]); is->caddr[2], is->caddr[3]); lockdep_on(); lockdep_on(); Loading