Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f201ae23 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar
Browse files

tracing/function-return-tracer: store return stack into task_struct and allocate it dynamically



Impact: use deeper function tracing depth safely

Some tests showed that function return tracing needed a more deeper depth
of function calls. But it could be unsafe to store these return addresses
to the stack.

So these arrays will now be allocated dynamically into task_struct of current
only when the tracer is activated.

Typical scheme when tracer is activated:
- allocate a return stack for each task in global list.
- fork: allocate the return stack for the newly created task
- exit: free return stack of current
- idle init: same as fork

I chose a default depth of 50. I don't have overruns anymore.

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a0a70c73
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -29,7 +29,6 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_RET_TRACER
#define FTRACE_RET_STACK_SIZE 20

#ifndef __ASSEMBLY__

+0 −29
Original line number Diff line number Diff line
@@ -40,36 +40,8 @@ struct thread_info {
						*/
	__u8			supervisor_stack[0];
#endif

#ifdef CONFIG_FUNCTION_RET_TRACER
	/* Index of current stored adress in ret_stack */
	int		curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	ret_stack[FTRACE_RET_STACK_SIZE];
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t	trace_overrun;
#endif
};

#ifdef CONFIG_FUNCTION_RET_TRACER
#define INIT_THREAD_INFO(tsk)			\
{						\
	.task		= &tsk,			\
	.exec_domain	= &default_exec_domain,	\
	.flags		= 0,			\
	.cpu		= 0,			\
	.preempt_count	= 1,			\
	.addr_limit	= KERNEL_DS,		\
	.restart_block = {			\
		.fn = do_no_restart_syscall,	\
	},					\
	.curr_ret_stack = -1,\
	.trace_overrun	= ATOMIC_INIT(0)	\
}
#else
#define INIT_THREAD_INFO(tsk)			\
{						\
	.task		= &tsk,			\
@@ -82,7 +54,6 @@ struct thread_info {
		.fn = do_no_restart_syscall,	\
	},					\
}
#endif

#define init_thread_info	(init_thread_union.thread_info)
#define init_stack		(init_thread_union.stack)
+15 −14
Original line number Diff line number Diff line
@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
				unsigned long func)
{
	int index;
	struct thread_info *ti = current_thread_info();

	if (!current->ret_stack)
		return -EBUSY;

	/* The return trace stack is full */
	if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
		atomic_inc(&ti->trace_overrun);
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

	index = ++ti->curr_ret_stack;
	index = ++current->curr_ret_stack;
	barrier();
	ti->ret_stack[index].ret = ret;
	ti->ret_stack[index].func = func;
	ti->ret_stack[index].calltime = time;
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
	current->ret_stack[index].calltime = time;

	return 0;
}
@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
{
	int index;

	struct thread_info *ti = current_thread_info();
	index = ti->curr_ret_stack;
	*ret = ti->ret_stack[index].ret;
	*func = ti->ret_stack[index].func;
	*time = ti->ret_stack[index].calltime;
	*overrun = atomic_read(&ti->trace_overrun);
	ti->curr_ret_stack--;
	index = current->curr_ret_stack;
	*ret = current->ret_stack[index].ret;
	*func = current->ret_stack[index].func;
	*time = current->ret_stack[index].calltime;
	*overrun = atomic_read(&current->trace_overrun);
	current->curr_ret_stack--;
}

/*
+5 −0
Original line number Diff line number Diff line
@@ -323,6 +323,8 @@ struct ftrace_retfunc {
};

#ifdef CONFIG_FUNCTION_RET_TRACER
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
/* Type of a callback handler of tracing return function */
typedef void (*trace_function_return_t)(struct ftrace_retfunc *);

@@ -330,6 +332,9 @@ extern int register_ftrace_return(trace_function_return_t func);
/* The current handler in use */
extern trace_function_return_t ftrace_function_return;
extern void unregister_ftrace_return(void);

extern void ftrace_retfunc_init_task(struct task_struct *t);
extern void ftrace_retfunc_exit_task(struct task_struct *t);
#endif

#endif /* _LINUX_FTRACE_H */
+11 −12
Original line number Diff line number Diff line
@@ -1352,6 +1352,17 @@ struct task_struct {
	unsigned long default_timer_slack_ns;

	struct list_head	*scm_work_list;
#ifdef CONFIG_FUNCTION_RET_TRACER
	/* Index of current stored adress in ret_stack */
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
#endif
};

/*
@@ -2006,18 +2017,6 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;

#ifdef CONFIG_FUNCTION_RET_TRACER
	/*
	 * When fork() creates a child process, this function is called.
	 * But the child task may not inherit the return adresses traced
	 * by the return function tracer because it will directly execute
	 * in userspace and will not return to kernel functions its parent
	 * used.
	 */
	task_thread_info(p)->curr_ret_stack = -1;
	atomic_set(&task_thread_info(p)->trace_overrun, 0);
#endif
}

static inline unsigned long *end_of_stack(struct task_struct *p)
Loading