Loading include/linux/sched/sysctl.h +1 −0 Original line number Diff line number Diff line Loading @@ -51,6 +51,7 @@ walt_proc_update_handler(struct ctl_table *table, int write, #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) || \ defined(CONFIG_PREEMPTIRQ_EVENTS) extern unsigned int sysctl_preemptoff_tracing_threshold_ns; extern unsigned int sysctl_irqsoff_tracing_threshold_ns; #endif Loading include/trace/events/preemptirq.h +28 −0 Original line number Diff line number Diff line Loading @@ -52,6 +52,34 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, TP_ARGS(ip, parent_ip)); #endif TRACE_EVENT(irqs_disable, TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1, unsigned long caddr2, unsigned long caddr3), TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3), TP_STRUCT__entry( __field(u64, delta) __field(void*, caddr0) __field(void*, caddr1) __field(void*, caddr2) __field(void*, caddr3) ), TP_fast_assign( __entry->delta = delta; __entry->caddr0 = (void *)caddr0; __entry->caddr1 = (void *)caddr1; __entry->caddr2 = (void *)caddr2; __entry->caddr3 = (void *)caddr3; ), TP_printk("delta=%llu(ns) Callers:(%pf<-%pf<-%pf<-%pf)", __entry->delta, __entry->caddr0, __entry->caddr1, __entry->caddr2, __entry->caddr3) ); #endif /* _TRACE_PREEMPTIRQ_H */ #include <trace/define_trace.h> Loading kernel/sysctl.c +7 −0 Original line number Diff line number Diff line Loading @@ -319,6 +319,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "irqsoff_tracing_threshold_ns", .data = &sysctl_irqsoff_tracing_threshold_ns, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SCHED_WALT { Loading kernel/trace/trace_irqsoff.c +44 −0 Original line number Diff line number Diff line Loading @@ -13,6 +13,8 @@ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/sched/clock.h> #include <linux/sched/sysctl.h> #include "trace.h" Loading @@ -39,6 +41,12 @@ static int save_flags; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); /* * irqsoff stack tracing threshold in ns. * default: 1ms */ unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL; #ifdef CONFIG_PREEMPT_TRACER static inline int preempt_trace(void) Loading Loading @@ -466,17 +474,53 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) #else /* !CONFIG_PROVE_LOCKING */ #ifdef CONFIG_PREEMPTIRQ_EVENTS struct irqsoff_store { u64 ts; unsigned long caddr[4]; }; DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ /* * We are only interested in hardirq on/off events: */ static inline void tracer_hardirqs_on(void) { #ifdef CONFIG_PREEMPTIRQ_EVENTS struct irqsoff_store *is = &per_cpu(the_irqsoff, raw_smp_processor_id()); if (!is->ts) { is->ts = sched_clock(); is->caddr[0] = CALLER_ADDR0; is->caddr[1] = CALLER_ADDR1; is->caddr[2] = CALLER_ADDR2; is->caddr[3] = CALLER_ADDR3; } #endif /* CONFIG_PREEMPTIRQ_EVENTS */ if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } static inline void tracer_hardirqs_off(void) { #ifdef CONFIG_PREEMPTIRQ_EVENTS struct irqsoff_store *is = &per_cpu(the_irqsoff, raw_smp_processor_id()); u64 delta = 0; if (is->ts) { delta = sched_clock() - is->ts; is->ts = 0; } if (delta > sysctl_irqsoff_tracing_threshold_ns) trace_irqs_disable(delta, is->caddr[0], is->caddr[1], is->caddr[2], is->caddr[3]); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } Loading Loading
include/linux/sched/sysctl.h +1 −0 Original line number Diff line number Diff line Loading @@ -51,6 +51,7 @@ walt_proc_update_handler(struct ctl_table *table, int write, #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) || \ defined(CONFIG_PREEMPTIRQ_EVENTS) extern unsigned int sysctl_preemptoff_tracing_threshold_ns; extern unsigned int sysctl_irqsoff_tracing_threshold_ns; #endif Loading
include/trace/events/preemptirq.h +28 −0 Original line number Diff line number Diff line Loading @@ -52,6 +52,34 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, TP_ARGS(ip, parent_ip)); #endif TRACE_EVENT(irqs_disable, TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1, unsigned long caddr2, unsigned long caddr3), TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3), TP_STRUCT__entry( __field(u64, delta) __field(void*, caddr0) __field(void*, caddr1) __field(void*, caddr2) __field(void*, caddr3) ), TP_fast_assign( __entry->delta = delta; __entry->caddr0 = (void *)caddr0; __entry->caddr1 = (void *)caddr1; __entry->caddr2 = (void *)caddr2; __entry->caddr3 = (void *)caddr3; ), TP_printk("delta=%llu(ns) Callers:(%pf<-%pf<-%pf<-%pf)", __entry->delta, __entry->caddr0, __entry->caddr1, __entry->caddr2, __entry->caddr3) ); #endif /* _TRACE_PREEMPTIRQ_H */ #include <trace/define_trace.h> Loading
kernel/sysctl.c +7 −0 Original line number Diff line number Diff line Loading @@ -319,6 +319,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "irqsoff_tracing_threshold_ns", .data = &sysctl_irqsoff_tracing_threshold_ns, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SCHED_WALT { Loading
kernel/trace/trace_irqsoff.c +44 −0 Original line number Diff line number Diff line Loading @@ -13,6 +13,8 @@ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/sched/clock.h> #include <linux/sched/sysctl.h> #include "trace.h" Loading @@ -39,6 +41,12 @@ static int save_flags; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); /* * irqsoff stack tracing threshold in ns. * default: 1ms */ unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL; #ifdef CONFIG_PREEMPT_TRACER static inline int preempt_trace(void) Loading Loading @@ -466,17 +474,53 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) #else /* !CONFIG_PROVE_LOCKING */ #ifdef CONFIG_PREEMPTIRQ_EVENTS struct irqsoff_store { u64 ts; unsigned long caddr[4]; }; DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ /* * We are only interested in hardirq on/off events: */ static inline void tracer_hardirqs_on(void) { #ifdef CONFIG_PREEMPTIRQ_EVENTS struct irqsoff_store *is = &per_cpu(the_irqsoff, raw_smp_processor_id()); if (!is->ts) { is->ts = sched_clock(); is->caddr[0] = CALLER_ADDR0; is->caddr[1] = CALLER_ADDR1; is->caddr[2] = CALLER_ADDR2; is->caddr[3] = CALLER_ADDR3; } #endif /* CONFIG_PREEMPTIRQ_EVENTS */ if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } static inline void tracer_hardirqs_off(void) { #ifdef CONFIG_PREEMPTIRQ_EVENTS struct irqsoff_store *is = &per_cpu(the_irqsoff, raw_smp_processor_id()); u64 delta = 0; if (is->ts) { delta = sched_clock() - is->ts; is->ts = 0; } if (delta > sysctl_irqsoff_tracing_threshold_ns) trace_irqs_disable(delta, is->caddr[0], is->caddr[1], is->caddr[2], is->caddr[3]); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } Loading