Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4104d326 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt
Browse files

ftrace: Remove global function list and call function directly



Instead of having a list of global functions that are called,
as only one global function is allow to be enabled at a time, there's
no reason to have a list.

Instead, simply have all the users of the global ops, use the global ops
directly, instead of registering their own ftrace_ops. Just switch what
function is used before enabling the function tracer.

This removes a lot of code as well as the complexity involved with it.

Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent a798c10f
Loading
Loading
Loading
Loading
+8 −12
Original line number Original line Diff line number Diff line
@@ -62,9 +62,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
 * set in the flags member.
 * set in the flags member.
 *
 *
 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 * GLOBAL  - set manualy by ftrace_ops user to denote the ftrace_ops
 *           is part of the global tracers sharing the same filter
 *           via set_ftrace_* debugfs files.
 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 *           allocated ftrace_ops which need special care
 *           allocated ftrace_ops which need special care
 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
@@ -96,15 +93,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
 */
 */
enum {
enum {
	FTRACE_OPS_FL_ENABLED			= 1 << 0,
	FTRACE_OPS_FL_ENABLED			= 1 << 0,
	FTRACE_OPS_FL_GLOBAL			= 1 << 1,
	FTRACE_OPS_FL_DYNAMIC			= 1 << 1,
	FTRACE_OPS_FL_DYNAMIC			= 1 << 2,
	FTRACE_OPS_FL_CONTROL			= 1 << 2,
	FTRACE_OPS_FL_CONTROL			= 1 << 3,
	FTRACE_OPS_FL_SAVE_REGS			= 1 << 3,
	FTRACE_OPS_FL_SAVE_REGS			= 1 << 4,
	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 4,
	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 5,
	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 5,
	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 6,
	FTRACE_OPS_FL_STUB			= 1 << 6,
	FTRACE_OPS_FL_STUB			= 1 << 7,
	FTRACE_OPS_FL_INITIALIZED		= 1 << 7,
	FTRACE_OPS_FL_INITIALIZED		= 1 << 8,
	FTRACE_OPS_FL_DELETED			= 1 << 8,
	FTRACE_OPS_FL_DELETED			= 1 << 9,
};
};


/*
/*
+42 −110
Original line number Original line Diff line number Diff line
@@ -62,7 +62,7 @@
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
#define FTRACE_HASH_MAX_BITS 12


#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)


#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
#define INIT_REGEX_LOCK(opsname)	\
#define INIT_REGEX_LOCK(opsname)	\
@@ -103,7 +103,6 @@ static int ftrace_disabled __read_mostly;


static DEFINE_MUTEX(ftrace_lock);
static DEFINE_MUTEX(ftrace_lock);


static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
@@ -171,23 +170,6 @@ int ftrace_nr_registered_ops(void)
	return cnt;
	return cnt;
}
}


static void
ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
			struct ftrace_ops *op, struct pt_regs *regs)
{
	int bit;

	bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
	if (bit < 0)
		return;

	do_for_each_ftrace_op(op, ftrace_global_list) {
		op->func(ip, parent_ip, op, regs);
	} while_for_each_ftrace_op(op);

	trace_clear_recursion(bit);
}

static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
			    struct ftrace_ops *op, struct pt_regs *regs)
			    struct ftrace_ops *op, struct pt_regs *regs)
{
{
@@ -237,43 +219,6 @@ static int control_ops_alloc(struct ftrace_ops *ops)
	return 0;
	return 0;
}
}


static void update_global_ops(void)
{
	ftrace_func_t func = ftrace_global_list_func;
	void *private = NULL;

	/* The list has its own recursion protection. */
	global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;

	/*
	 * If there's only one function registered, then call that
	 * function directly. Otherwise, we need to iterate over the
	 * registered callers.
	 */
	if (ftrace_global_list == &ftrace_list_end ||
	    ftrace_global_list->next == &ftrace_list_end) {
		func = ftrace_global_list->func;
		private = ftrace_global_list->private;
		/*
		 * As we are calling the function directly.
		 * If it does not have recursion protection,
		 * the function_trace_op needs to be updated
		 * accordingly.
		 */
		if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
			global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
	}

	/* If we filter on pids, update to use the pid function */
	if (!list_empty(&ftrace_pids)) {
		set_ftrace_pid_function(func);
		func = ftrace_pid_func;
	}

	global_ops.func = func;
	global_ops.private = private;
}

static void ftrace_sync(struct work_struct *work)
static void ftrace_sync(struct work_struct *work)
{
{
	/*
	/*
@@ -301,8 +246,6 @@ static void update_ftrace_function(void)
{
{
	ftrace_func_t func;
	ftrace_func_t func;


	update_global_ops();

	/*
	/*
	 * If we are at the end of the list and this ops is
	 * If we are at the end of the list and this ops is
	 * recursion safe and not dynamic and the arch supports passing ops,
	 * recursion safe and not dynamic and the arch supports passing ops,
@@ -314,9 +257,6 @@ static void update_ftrace_function(void)
	     (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
	     (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
	     !FTRACE_FORCE_LIST_FUNC)) {
	     !FTRACE_FORCE_LIST_FUNC)) {
		/* Set the ftrace_ops that the arch callback uses */
		/* Set the ftrace_ops that the arch callback uses */
		if (ftrace_ops_list == &global_ops)
			set_function_trace_op = ftrace_global_list;
		else
		set_function_trace_op = ftrace_ops_list;
		set_function_trace_op = ftrace_ops_list;
		func = ftrace_ops_list->func;
		func = ftrace_ops_list->func;
	} else {
	} else {
@@ -434,16 +374,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
	if (ops->flags & FTRACE_OPS_FL_DELETED)
	if (ops->flags & FTRACE_OPS_FL_DELETED)
		return -EINVAL;
		return -EINVAL;


	if (FTRACE_WARN_ON(ops == &global_ops))
		return -EINVAL;

	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
		return -EBUSY;
		return -EBUSY;


	/* We don't support both control and global flags set. */
	if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
		return -EINVAL;

#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
	/*
	/*
	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
@@ -461,10 +394,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
	if (!core_kernel_data((unsigned long)ops))
	if (!core_kernel_data((unsigned long)ops))
		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
		ops->flags |= FTRACE_OPS_FL_DYNAMIC;


	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
		add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
		ops->flags |= FTRACE_OPS_FL_ENABLED;
	} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
		if (control_ops_alloc(ops))
		if (control_ops_alloc(ops))
			return -ENOMEM;
			return -ENOMEM;
		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
@@ -484,15 +414,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
		return -EBUSY;
		return -EBUSY;


	if (FTRACE_WARN_ON(ops == &global_ops))
	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
		return -EINVAL;

	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
		ret = remove_ftrace_list_ops(&ftrace_global_list,
					     &global_ops, ops);
		if (!ret)
			ops->flags &= ~FTRACE_OPS_FL_ENABLED;
	} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
		ret = remove_ftrace_list_ops(&ftrace_control_list,
		ret = remove_ftrace_list_ops(&ftrace_control_list,
					     &control_ops, ops);
					     &control_ops, ops);
	} else
	} else
@@ -2128,15 +2050,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
	ftrace_start_up++;
	ftrace_start_up++;
	command |= FTRACE_UPDATE_CALLS;
	command |= FTRACE_UPDATE_CALLS;


	/* ops marked global share the filter hashes */
	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
		ops = &global_ops;
		/* Don't update hash if global is already set */
		if (global_start_up)
			hash_enable = false;
		global_start_up++;
	}

	ops->flags |= FTRACE_OPS_FL_ENABLED;
	ops->flags |= FTRACE_OPS_FL_ENABLED;
	if (hash_enable)
	if (hash_enable)
		ftrace_hash_rec_enable(ops, 1);
		ftrace_hash_rec_enable(ops, 1);
@@ -2166,21 +2079,10 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
	 */
	 */
	WARN_ON_ONCE(ftrace_start_up < 0);
	WARN_ON_ONCE(ftrace_start_up < 0);


	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
		ops = &global_ops;
		global_start_up--;
		WARN_ON_ONCE(global_start_up < 0);
		/* Don't update hash if global still has users */
		if (global_start_up) {
			WARN_ON_ONCE(!ftrace_start_up);
			hash_disable = false;
		}
	}

	if (hash_disable)
	if (hash_disable)
		ftrace_hash_rec_disable(ops, 1);
		ftrace_hash_rec_disable(ops, 1);


	if (ops != &global_ops || !global_start_up)
	if (!global_start_up)
		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
		ops->flags &= ~FTRACE_OPS_FL_ENABLED;


	command |= FTRACE_UPDATE_CALLS;
	command |= FTRACE_UPDATE_CALLS;
@@ -3524,10 +3426,6 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
	struct ftrace_hash *hash;
	struct ftrace_hash *hash;
	int ret;
	int ret;


	/* All global ops uses the global ops filters */
	if (ops->flags & FTRACE_OPS_FL_GLOBAL)
		ops = &global_ops;

	if (unlikely(ftrace_disabled))
	if (unlikely(ftrace_disabled))
		return -ENODEV;
		return -ENODEV;


@@ -4462,6 +4360,34 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)


#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_DYNAMIC_FTRACE */


__init void ftrace_init_global_array_ops(struct trace_array *tr)
{
	tr->ops = &global_ops;
	tr->ops->private = tr;
}

void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
{
	/* If we filter on pids, update to use the pid function */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
		if (WARN_ON(tr->ops->func != ftrace_stub))
			printk("ftrace ops had %pS for function\n",
			       tr->ops->func);
		/* Only the top level instance does pid tracing */
		if (!list_empty(&ftrace_pids)) {
			set_ftrace_pid_function(func);
			func = ftrace_pid_func;
		}
	}
	tr->ops->func = func;
	tr->ops->private = tr;
}

void ftrace_reset_array_ops(struct trace_array *tr)
{
	tr->ops->func = ftrace_stub;
}

static void
static void
ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
			struct ftrace_ops *op, struct pt_regs *regs)
			struct ftrace_ops *op, struct pt_regs *regs)
@@ -4520,9 +4446,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
	 */
	 */
	preempt_disable_notrace();
	preempt_disable_notrace();
	do_for_each_ftrace_op(op, ftrace_ops_list) {
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (ftrace_ops_test(op, ip, regs))
		if (ftrace_ops_test(op, ip, regs)) {
			if (WARN_ON(!op->func)) {
				function_trace_stop = 1;
				printk("op=%p %pS\n", op, op);
				goto out;
			}
			op->func(ip, parent_ip, op, regs);
			op->func(ip, parent_ip, op, regs);
		}
	} while_for_each_ftrace_op(op);
	} while_for_each_ftrace_op(op);
out:
	preempt_enable_notrace();
	preempt_enable_notrace();
	trace_clear_recursion(bit);
	trace_clear_recursion(bit);
}
}
@@ -5076,8 +5009,7 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
/* Just a place holder for function graph */
/* Just a place holder for function graph */
static struct ftrace_ops fgraph_ops __read_mostly = {
static struct ftrace_ops fgraph_ops __read_mostly = {
	.func		= ftrace_stub,
	.func		= ftrace_stub,
	.flags		= FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
	.flags		= FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_RECURSION_SAFE,
				FTRACE_OPS_FL_RECURSION_SAFE,
};
};


static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
+2 −0
Original line number Original line Diff line number Diff line
@@ -6629,6 +6629,8 @@ __init static int tracer_alloc_buffers(void)
	 */
	 */
	global_trace.current_trace = &nop_trace;
	global_trace.current_trace = &nop_trace;


	ftrace_init_global_array_ops(&global_trace);

	register_tracer(&nop_trace);
	register_tracer(&nop_trace);


	/* All seems OK, enable tracing */
	/* All seems OK, enable tracing */
+9 −10
Original line number Original line Diff line number Diff line
@@ -416,13 +416,7 @@ enum {
	TRACE_FTRACE_IRQ_BIT,
	TRACE_FTRACE_IRQ_BIT,
	TRACE_FTRACE_SIRQ_BIT,
	TRACE_FTRACE_SIRQ_BIT,


	/* GLOBAL_BITs must be greater than FTRACE_BITs */
	/* INTERNAL_BITs must be greater than FTRACE_BITs */
	TRACE_GLOBAL_BIT,
	TRACE_GLOBAL_NMI_BIT,
	TRACE_GLOBAL_IRQ_BIT,
	TRACE_GLOBAL_SIRQ_BIT,

	/* INTERNAL_BITs must be greater than GLOBAL_BITs */
	TRACE_INTERNAL_BIT,
	TRACE_INTERNAL_BIT,
	TRACE_INTERNAL_NMI_BIT,
	TRACE_INTERNAL_NMI_BIT,
	TRACE_INTERNAL_IRQ_BIT,
	TRACE_INTERNAL_IRQ_BIT,
@@ -449,9 +443,6 @@ enum {
#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)


#define TRACE_GLOBAL_START	TRACE_GLOBAL_BIT
#define TRACE_GLOBAL_MAX	((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_LIST_START	TRACE_INTERNAL_BIT
#define TRACE_LIST_START	TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)


@@ -823,6 +814,9 @@ extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr,
int ftrace_create_function_files(struct trace_array *tr,
				 struct dentry *parent);
				 struct dentry *parent);
void ftrace_destroy_function_files(struct trace_array *tr);
void ftrace_destroy_function_files(struct trace_array *tr);
void ftrace_init_global_array_ops(struct trace_array *tr);
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
void ftrace_reset_array_ops(struct trace_array *tr);
#else
#else
static inline int ftrace_trace_task(struct task_struct *task)
static inline int ftrace_trace_task(struct task_struct *task)
{
{
@@ -836,6 +830,11 @@ ftrace_create_function_files(struct trace_array *tr,
	return 0;
	return 0;
}
}
static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
static inline __init void
ftrace_init_global_array_ops(struct trace_array *tr) { }
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
/* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */


#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
+19 −36
Original line number Original line Diff line number Diff line
@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
static void
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
			  struct ftrace_ops *op, struct pt_regs *pt_regs);
			  struct ftrace_ops *op, struct pt_regs *pt_regs);
static struct ftrace_ops trace_ops;
static struct ftrace_ops trace_stack_ops;
static struct tracer_flags func_flags;
static struct tracer_flags func_flags;


/* Our option */
/* Our option */
@@ -83,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr)


static int function_trace_init(struct trace_array *tr)
static int function_trace_init(struct trace_array *tr)
{
{
	struct ftrace_ops *ops;
	ftrace_func_t func;

	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
		/* There's only one global tr */
		if (!trace_ops.private) {
			trace_ops.private = tr;
			trace_stack_ops.private = tr;
		}


		if (func_flags.val & TRACE_FUNC_OPT_STACK)
			ops = &trace_stack_ops;
		else
			ops = &trace_ops;
		tr->ops = ops;
	} else if (!tr->ops) {
	/*
	/*
	 * Instance trace_arrays get their ops allocated
	 * Instance trace_arrays get their ops allocated
	 * at instance creation. Unless it failed
	 * at instance creation. Unless it failed
	 * the allocation.
	 * the allocation.
	 */
	 */
	if (!tr->ops)
		return -ENOMEM;
		return -ENOMEM;
	}

	/* Currently only the global instance can do stack tracing */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
	    func_flags.val & TRACE_FUNC_OPT_STACK)
		func = function_stack_trace_call;
	else
		func = function_trace_call;

	ftrace_init_array_ops(tr, func);


	tr->trace_buffer.cpu = get_cpu();
	tr->trace_buffer.cpu = get_cpu();
	put_cpu();
	put_cpu();
@@ -118,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr)
{
{
	tracing_stop_function_trace(tr);
	tracing_stop_function_trace(tr);
	tracing_stop_cmdline_record();
	tracing_stop_cmdline_record();
	ftrace_reset_array_ops(tr);
}
}


static void function_trace_start(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
@@ -199,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}


static struct ftrace_ops trace_ops __read_mostly =
{
	.func = function_trace_call,
	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};

static struct ftrace_ops trace_stack_ops __read_mostly =
{
	.func = function_stack_trace_call,
	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};

static struct tracer_opt func_opts[] = {
static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
#ifdef CONFIG_STACKTRACE
	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -248,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
		unregister_ftrace_function(tr->ops);
		unregister_ftrace_function(tr->ops);


		if (set) {
		if (set) {
			tr->ops = &trace_stack_ops;
			tr->ops->func = function_stack_trace_call;
			register_ftrace_function(tr->ops);
			register_ftrace_function(tr->ops);
		} else {
		} else {
			tr->ops = &trace_ops;
			tr->ops->func = function_trace_call;
			register_ftrace_function(tr->ops);
			register_ftrace_function(tr->ops);
		}
		}


Loading