Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2481a87b authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky
Browse files

s390/ftrace: optimize function graph caller code



When the function graph tracer is disabled we can skip three additional
instructions. So let's just do this.

So if function tracing is enabled but function graph tracing is
runtime disabled, we get away with a single unconditional branch.

Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 0f1b1ff5
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__


extern void _mcount(void);
extern void _mcount(void);
extern char ftrace_graph_caller_end;


struct dyn_arch_ftrace { };
struct dyn_arch_ftrace { };


+24 −0
Original line number Original line Diff line number Diff line
@@ -170,6 +170,29 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
 * directly after the instructions. To enable the call we calculate
 * directly after the instructions. To enable the call we calculate
 * the original offset to prepare_ftrace_return and put it back.
 * the original offset to prepare_ftrace_return and put it back.
 */
 */

#ifdef CONFIG_64BIT

int ftrace_enable_ftrace_graph_caller(void)
{
	static unsigned short offset = 0x0002;

	return probe_kernel_write((void *) ftrace_graph_caller + 2,
				  &offset, sizeof(offset));
}

int ftrace_disable_ftrace_graph_caller(void)
{
	unsigned short offset;

	offset = ((void *) &ftrace_graph_caller_end -
		  (void *) ftrace_graph_caller) / 2;
	return probe_kernel_write((void *) ftrace_graph_caller + 2,
				  &offset, sizeof(offset));
}

#else /* CONFIG_64BIT */

int ftrace_enable_ftrace_graph_caller(void)
int ftrace_enable_ftrace_graph_caller(void)
{
{
	unsigned short offset;
	unsigned short offset;
@@ -188,5 +211,6 @@ int ftrace_disable_ftrace_graph_caller(void)
				  &offset, sizeof(offset));
				  &offset, sizeof(offset));
}
}


#endif /* CONFIG_64BIT */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+9 −6
Original line number Original line Diff line number Diff line
@@ -32,14 +32,17 @@ ENTRY(ftrace_caller)
	lg	%r14,0(%r14)
	lg	%r14,0(%r14)
	basr	%r14,%r14
	basr	%r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
#	j	.+4
ENTRY(ftrace_graph_caller)
	j	ftrace_graph_caller_end
	lg	%r2,168(%r15)
	lg	%r2,168(%r15)
	lg	%r3,272(%r15)
	lg	%r3,272(%r15)
ENTRY(ftrace_graph_caller)
	brasl	%r14,prepare_ftrace_return
# The bras instruction gets runtime patched to call prepare_ftrace_return.
	stg	%r2,168(%r15)
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
ftrace_graph_caller_end:
#	bras	%r14,prepare_ftrace_return
	.globl	ftrace_graph_caller_end
	bras	%r14,0f
0:	stg	%r2,168(%r15)
#endif
#endif
	aghi	%r15,160
	aghi	%r15,160
	lmg	%r2,%r5,32(%r15)
	lmg	%r2,%r5,32(%r15)