Loading arch/x86/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -29,7 +29,7 @@ config X86 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER if X86_32 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER Loading arch/x86/kernel/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -17,6 +17,7 @@ endif ifdef CONFIG_FUNCTION_GRAPH_TRACER # Don't trace __switch_to() but let it for function tracer CFLAGS_REMOVE_process_32.o = -pg CFLAGS_REMOVE_process_64.o = -pg endif # Loading arch/x86/kernel/entry_64.S +74 −0 Original line number Diff line number Diff line Loading @@ -98,6 +98,12 @@ ftrace_call: movq (%rsp), %rax addq $0x38, %rsp #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: jmp ftrace_stub #endif .globl ftrace_stub ftrace_stub: retq Loading @@ -110,6 +116,12 @@ ENTRY(mcount) cmpq $ftrace_stub, ftrace_trace_function jnz trace #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpq $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller #endif .globl ftrace_stub ftrace_stub: retq Loading Loading @@ -145,6 +157,68 @@ END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) cmpl $0, function_trace_stop jne ftrace_stub subq $0x38, %rsp movq %rax, (%rsp) movq %rcx, 8(%rsp) movq %rdx, 16(%rsp) movq %rsi, 24(%rsp) movq %rdi, 32(%rsp) movq %r8, 40(%rsp) movq %r9, 48(%rsp) leaq 8(%rbp), %rdi movq 0x38(%rsp), %rsi call prepare_ftrace_return movq 48(%rsp), %r9 movq 40(%rsp), %r8 movq 32(%rsp), %rdi movq 24(%rsp), %rsi movq 16(%rsp), %rdx movq 8(%rsp), %rcx movq (%rsp), %rax addq $0x38, %rsp retq END(ftrace_graph_caller) .globl return_to_handler return_to_handler: subq $80, %rsp movq %rax, (%rsp) movq %rcx, 8(%rsp) movq %rdx, 16(%rsp) movq %rsi, 24(%rsp) movq %rdi, 32(%rsp) movq %r8, 40(%rsp) movq %r9, 48(%rsp) movq %r10, 56(%rsp) movq %r11, 64(%rsp) call ftrace_return_to_handler movq %rax, 72(%rsp) movq 64(%rsp), %r11 movq 56(%rsp), %r10 movq 48(%rsp), %r9 movq 40(%rsp), %r8 movq 32(%rsp), %rdi movq 24(%rsp), %rsi movq 16(%rsp), %rdx movq 8(%rsp), %rcx movq (%rsp), %rax addq $72, %rsp retq #endif #ifndef CONFIG_PREEMPT #define retint_kernel retint_restore_args #endif Loading arch/x86/kernel/ftrace.c +10 −1 Original line number Diff line number Diff line Loading @@ -467,8 +467,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) * ignore such a protection. */ asm volatile( #ifdef CONFIG_X86_64 "1: movq (%[parent_old]), %[old]\n" "2: movq %[return_hooker], (%[parent_replaced])\n" #else "1: movl (%[parent_old]), %[old]\n" "2: movl %[return_hooker], (%[parent_replaced])\n" #endif " movl $0, %[faulted]\n" ".section .fixup, \"ax\"\n" Loading @@ -476,8 +481,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ".previous\n" ".section __ex_table, \"a\"\n" #ifdef CONFIG_X86_64 " .quad 1b, 3b\n" " .quad 2b, 3b\n" #else " .long 1b, 3b\n" " .long 2b, 3b\n" #endif ".previous\n" : [parent_replaced] "=r" (parent), [old] "=r" (old), Loading Loading @@ -509,5 +519,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ftrace_graph_entry(&trace); } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ kernel/trace/ftrace.c +3 −1 Original line number Diff line number Diff line Loading @@ -1671,8 +1671,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) } if (t->ret_stack == NULL) { t->ret_stack = ret_stack_list[start++]; t->curr_ret_stack = -1; /* Make sure IRQs see the -1 first: */ barrier(); t->ret_stack = ret_stack_list[start++]; atomic_set(&t->trace_overrun, 0); } } while_each_thread(g, t); Loading Loading
arch/x86/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -29,7 +29,7 @@ config X86 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER if X86_32 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER Loading
arch/x86/kernel/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -17,6 +17,7 @@ endif ifdef CONFIG_FUNCTION_GRAPH_TRACER # Don't trace __switch_to() but let it for function tracer CFLAGS_REMOVE_process_32.o = -pg CFLAGS_REMOVE_process_64.o = -pg endif # Loading
arch/x86/kernel/entry_64.S +74 −0 Original line number Diff line number Diff line Loading @@ -98,6 +98,12 @@ ftrace_call: movq (%rsp), %rax addq $0x38, %rsp #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: jmp ftrace_stub #endif .globl ftrace_stub ftrace_stub: retq Loading @@ -110,6 +116,12 @@ ENTRY(mcount) cmpq $ftrace_stub, ftrace_trace_function jnz trace #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpq $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller #endif .globl ftrace_stub ftrace_stub: retq Loading Loading @@ -145,6 +157,68 @@ END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) cmpl $0, function_trace_stop jne ftrace_stub subq $0x38, %rsp movq %rax, (%rsp) movq %rcx, 8(%rsp) movq %rdx, 16(%rsp) movq %rsi, 24(%rsp) movq %rdi, 32(%rsp) movq %r8, 40(%rsp) movq %r9, 48(%rsp) leaq 8(%rbp), %rdi movq 0x38(%rsp), %rsi call prepare_ftrace_return movq 48(%rsp), %r9 movq 40(%rsp), %r8 movq 32(%rsp), %rdi movq 24(%rsp), %rsi movq 16(%rsp), %rdx movq 8(%rsp), %rcx movq (%rsp), %rax addq $0x38, %rsp retq END(ftrace_graph_caller) .globl return_to_handler return_to_handler: subq $80, %rsp movq %rax, (%rsp) movq %rcx, 8(%rsp) movq %rdx, 16(%rsp) movq %rsi, 24(%rsp) movq %rdi, 32(%rsp) movq %r8, 40(%rsp) movq %r9, 48(%rsp) movq %r10, 56(%rsp) movq %r11, 64(%rsp) call ftrace_return_to_handler movq %rax, 72(%rsp) movq 64(%rsp), %r11 movq 56(%rsp), %r10 movq 48(%rsp), %r9 movq 40(%rsp), %r8 movq 32(%rsp), %rdi movq 24(%rsp), %rsi movq 16(%rsp), %rdx movq 8(%rsp), %rcx movq (%rsp), %rax addq $72, %rsp retq #endif #ifndef CONFIG_PREEMPT #define retint_kernel retint_restore_args #endif Loading
arch/x86/kernel/ftrace.c +10 −1 Original line number Diff line number Diff line Loading @@ -467,8 +467,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) * ignore such a protection. */ asm volatile( #ifdef CONFIG_X86_64 "1: movq (%[parent_old]), %[old]\n" "2: movq %[return_hooker], (%[parent_replaced])\n" #else "1: movl (%[parent_old]), %[old]\n" "2: movl %[return_hooker], (%[parent_replaced])\n" #endif " movl $0, %[faulted]\n" ".section .fixup, \"ax\"\n" Loading @@ -476,8 +481,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ".previous\n" ".section __ex_table, \"a\"\n" #ifdef CONFIG_X86_64 " .quad 1b, 3b\n" " .quad 2b, 3b\n" #else " .long 1b, 3b\n" " .long 2b, 3b\n" #endif ".previous\n" : [parent_replaced] "=r" (parent), [old] "=r" (old), Loading Loading @@ -509,5 +519,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ftrace_graph_entry(&trace); } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
kernel/trace/ftrace.c +3 −1 Original line number Diff line number Diff line Loading @@ -1671,8 +1671,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) } if (t->ret_stack == NULL) { t->ret_stack = ret_stack_list[start++]; t->curr_ret_stack = -1; /* Make sure IRQs see the -1 first: */ barrier(); t->ret_stack = ret_stack_list[start++]; atomic_set(&t->trace_overrun, 0); } } while_each_thread(g, t); Loading