Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2ee5b92a authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware)
Browse files

tracing: Update stack trace skipping for ORC unwinder



With the addition of ORC unwinder and FRAME POINTER unwinder, the stack
trace skipping requirements have changed.

I went through the tracing stack trace dumps with ORC and with frame
pointers and recalculated the proper values.

Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 6be7fa3c
Loading
Loading
Loading
Loading
+20 −14
Original line number Diff line number Diff line
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);

/*
 * Skip 3:
 *
 *   trace_buffer_unlock_commit_regs()
 *   trace_event_buffer_commit()
 *   trace_event_raw_event_xxx()
*/
# define STACK_SKIP 3

void trace_buffer_unlock_commit_regs(struct trace_array *tr,
				     struct ring_buffer *buffer,
				     struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
	__buffer_unlock_commit(buffer, event);

	/*
	 * If regs is not set, then skip the following callers:
	 *   trace_buffer_unlock_commit_regs
	 *   event_trigger_unlock_commit
	 *   trace_event_buffer_commit
	 *   trace_event_raw_event_sched_switch
	 * If regs is not set, then skip the necessary functions.
	 * Note, we can still get here via blktrace, wakeup tracer
	 * and mmiotrace, but that's ok if they lose a function or
	 * two. They are that meaningful.
	 * two. They are not that meaningful.
	 */
	ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
	ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
	ftrace_trace_userstack(buffer, flags, pc);
}

@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
	trace.skip		= skip;

	/*
	 * Add two, for this function and the call to save_stack_trace()
	 * Add one, for this function and the call to save_stack_trace()
	 * If regs is set, then these functions will not be in the way.
	 */
#ifndef CONFIG_UNWINDER_ORC
	if (!regs)
		trace.skip += 2;
		trace.skip++;
#endif

	/*
	 * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)

	local_save_flags(flags);

	/*
	 * Skip 3 more, seems to get us at the caller of
	 * this function.
	 */
	skip += 3;
#ifndef CONFIG_UNWINDER_ORC
	/* Skip 1 to skip this function. */
	skip++;
#endif
	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
			     flags, skip, preempt_count(), NULL);
}
+11 −2
Original line number Diff line number Diff line
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#endif /* CONFIG_TRACER_SNAPSHOT */

#ifdef CONFIG_STACKTRACE
#ifdef CONFIG_UNWINDER_ORC
/* Skip 2:
 *   event_triggers_post_call()
 *   trace_event_raw_event_xxx()
 */
# define STACK_SKIP 2
#else
/*
 * Skip 3:
 * Skip 4:
 *   stacktrace_trigger()
 *   event_triggers_post_call()
 *   trace_event_buffer_commit()
 *   trace_event_raw_event_xxx()
 */
#define STACK_SKIP 3
#define STACK_SKIP 4
#endif

static void
stacktrace_trigger(struct event_trigger_data *data, void *rec)
+36 −13
Original line number Diff line number Diff line
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
	preempt_enable_notrace();
}

#ifdef CONFIG_UNWINDER_ORC
/*
 * Skip 2:
 *
 *   function_stack_trace_call()
 *   ftrace_call()
 */
#define STACK_SKIP 2
#else
/*
 * Skip 3:
 *   __trace_stack()
 *   function_stack_trace_call()
 *   ftrace_call()
 */
#define STACK_SKIP 3
#endif

static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
			  struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
	if (likely(disabled == 1)) {
		pc = preempt_count();
		trace_function(tr, ip, parent_ip, flags, pc);
		/*
		 * skip over 5 funcs:
		 *    __ftrace_trace_stack,
		 *    __trace_stack,
		 *    function_stack_trace_call
		 *    ftrace_list_func
		 *    ftrace_call
		 */
		__trace_stack(tr, flags, 5, pc);
		__trace_stack(tr, flags, STACK_SKIP, pc);
	}

	atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
	tracer_tracing_off(tr);
}

#ifdef CONFIG_UNWINDER_ORC
/*
 * Skip 4:
 * Skip 3:
 *
 *   function_trace_probe_call()
 *   ftrace_ops_assist_func()
 *   ftrace_call()
 */
#define FTRACE_STACK_SKIP 3
#else
/*
 * Skip 5:
 *
 *   __trace_stack()
 *   ftrace_stacktrace()
 *   function_trace_probe_call()
 *   ftrace_ops_list_func()
 *   ftrace_ops_assist_func()
 *   ftrace_call()
 */
#define STACK_SKIP 4
#define FTRACE_STACK_SKIP 5
#endif

static __always_inline void trace_stack(struct trace_array *tr)
{
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
	local_save_flags(flags);
	pc = preempt_count();

	__trace_stack(tr, flags, STACK_SKIP, pc);
	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
}

static void