Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1c873be7 authored by Mike Frysinger's avatar Mike Frysinger
Browse files

Blackfin: initial support for ftrace



Just the basic ftrace support here -- mcount and the ftrace stub.

Signed-off-by: default avatarMike Frysinger <vapier@gentoo.org>
parent 6fa68e7a
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ config RWSEM_XCHGADD_ALGORITHM

config BLACKFIN
	def_bool y
	select HAVE_FUNCTION_TRACER
	select HAVE_IDE
	select HAVE_KERNEL_GZIP
	select HAVE_KERNEL_BZIP2
+13 −1
Original line number Diff line number Diff line
/* empty */
/*
 * Blackfin ftrace code
 *
 * Copyright 2009 Analog Devices Inc.
 * Licensed under the GPL-2 or later.
 */

#ifndef __ASM_BFIN_FTRACE_H__
#define __ASM_BFIN_FTRACE_H__

#define MCOUNT_INSN_SIZE	8 /* sizeof mcount call: LINK + CALL */

#endif
+1 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@ else
    obj-y += time.o
endif

obj-$(CONFIG_FUNCTION_TRACER)        += ftrace-entry.o
obj-$(CONFIG_IPIPE)                  += ipipe.o
obj-$(CONFIG_IPIPE_TRACE_MCOUNT)     += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS)          += gptimers.o
+5 −0
Original line number Diff line number Diff line
@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
#endif
#endif

#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
+72 −0
Original line number Diff line number Diff line
/*
 * mcount and friends -- ftrace stuff
 *
 * Copyright (C) 2009 Analog Devices Inc.
 * Licensed under the GPL-2 or later.
 */

#include <linux/linkage.h>
#include <asm/ftrace.h>

.text

/* GCC will have called us before setting up the function prologue, so we
 * can clobber the normal scratch registers, but we need to make sure to
 * save/restore the registers used for argument passing (R0-R2) in case
 * the profiled function is using them.  With data registers, R3 is the
 * only one we can blow away.  With pointer registers, we have P0-P2.
 *
 * Upon entry, the RETS will point to the top of the current profiled
 * function.  And since GCC setup the frame for us, the previous function
 * will be waiting there.  mmmm pie.
 */
ENTRY(__mcount)
	/* save third function arg early so we can do testing below */
	[--sp] = r2;

	/* load the function pointer to the tracer */
	p0.l = _ftrace_trace_function;
	p0.h = _ftrace_trace_function;
	r3 = [p0];

	/* optional micro optimization: don't call the stub tracer */
	r2.l = _ftrace_stub;
	r2.h = _ftrace_stub;
	cc = r2 == r3;
	if ! cc jump .Ldo_trace;

	r2 = [sp++];
	rts;

.Ldo_trace:

	/* save first/second function arg and the return register */
	[--sp] = r0;
	[--sp] = r1;
	[--sp] = rets;

	/* setup the tracer function */
	p0 = r3;

	/* tracer(ulong frompc, ulong selfpc):
	 *  frompc: the pc that did the call to ...
	 *  selfpc: ... this location
	 * the selfpc itself will need adjusting for the mcount call
	 */
	r1 = rets;
	r0 = [fp + 4];
	r1 += -MCOUNT_INSN_SIZE;

	/* call the tracer */
	call (p0);

	/* restore state and get out of dodge */
	rets = [sp++];
	r1 = [sp++];
	r0 = [sp++];
	r2 = [sp++];

.globl _ftrace_stub
_ftrace_stub:
	rts;
ENDPROC(__mcount)