Loading kernel/trace/Kconfig +2 −0 Original line number Diff line number Diff line Loading @@ -52,6 +52,7 @@ config FUNCTION_TRACER depends on HAVE_FUNCTION_TRACER depends on DEBUG_KERNEL select FRAME_POINTER select KALLSYMS select TRACING select CONTEXT_SWITCH_TRACER help Loading Loading @@ -238,6 +239,7 @@ config STACK_TRACER depends on DEBUG_KERNEL select FUNCTION_TRACER select STACKTRACE select KALLSYMS help This special tracer records the maximum stack footprint of the kernel and displays it in debugfs/tracing/stack_trace. Loading kernel/trace/trace_selftest.c +19 −0 Original line number Diff line number Diff line Loading @@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) { struct ring_buffer_event *event; struct trace_entry *entry; unsigned int loops = 0; while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { entry = ring_buffer_event_data(event); /* * The ring buffer is a size of trace_buf_size, if * we loop more than the size, there's something wrong * with the ring buffer. */ if (loops++ > trace_buf_size) { printk(KERN_CONT ".. bad ring buffer "); goto failed; } if (!trace_valid_entry(entry)) { printk(KERN_CONT ".. invalid entry %d ", entry->type); Loading Loading @@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) cnt = ring_buffer_entries(tr->buffer); /* * The trace_test_buffer_cpu runs a while loop to consume all data. * If the calling tracer is broken, and is constantly filling * the buffer, this will run forever, and hard lock the box. * We disable the ring buffer while we do this test to prevent * a hard lock up. */ tracing_off(); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } tracing_on(); __raw_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); Loading Loading
kernel/trace/Kconfig +2 −0 Original line number Diff line number Diff line Loading @@ -52,6 +52,7 @@ config FUNCTION_TRACER depends on HAVE_FUNCTION_TRACER depends on DEBUG_KERNEL select FRAME_POINTER select KALLSYMS select TRACING select CONTEXT_SWITCH_TRACER help Loading Loading @@ -238,6 +239,7 @@ config STACK_TRACER depends on DEBUG_KERNEL select FUNCTION_TRACER select STACKTRACE select KALLSYMS help This special tracer records the maximum stack footprint of the kernel and displays it in debugfs/tracing/stack_trace. Loading
kernel/trace/trace_selftest.c +19 −0 Original line number Diff line number Diff line Loading @@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) { struct ring_buffer_event *event; struct trace_entry *entry; unsigned int loops = 0; while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { entry = ring_buffer_event_data(event); /* * The ring buffer is a size of trace_buf_size, if * we loop more than the size, there's something wrong * with the ring buffer. */ if (loops++ > trace_buf_size) { printk(KERN_CONT ".. bad ring buffer "); goto failed; } if (!trace_valid_entry(entry)) { printk(KERN_CONT ".. invalid entry %d ", entry->type); Loading Loading @@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) cnt = ring_buffer_entries(tr->buffer); /* * The trace_test_buffer_cpu runs a while loop to consume all data. * If the calling tracer is broken, and is constantly filling * the buffer, this will run forever, and hard lock the box. * We disable the ring buffer while we do this test to prevent * a hard lock up. */ tracing_off(); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } tracing_on(); __raw_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); Loading