Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3a659305 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter, ftrace: Fix perf_counter integration



Adds possible second part to the assign argument of TP_EVENT().

  TP_perf_assign(
	__perf_count(foo);
	__perf_addr(bar);
  )

Which, when specified make the swcounter increment with @foo instead
of the usual 1, and report @bar for PERF_SAMPLE_ADDR (data address
associated with the event) when this triggers a counter overflow.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e3560336
Loading
Loading
Loading
Loading
+85 −25
Original line number Diff line number Diff line
@@ -144,6 +144,9 @@
#undef TP_fast_assign
#define TP_fast_assign(args...) args

#undef TP_perf_assign
#define TP_perf_assign(args...)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
@@ -345,6 +348,88 @@ static inline int ftrace_get_offsets_##call( \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#ifdef CONFIG_EVENT_PROFILE

/*
 * Generate the functions needed for tracepoint perf_counter support.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 * 	extern void perf_tpcounter_event(int, u64, u64);
 * 	u64 __addr = 0, __count = 1;
 *
 * 	<assign>   <-- here we expand the TP_perf_assign() macro
 *
 * 	perf_tpcounter_event(event_<call>.id, __addr, __count);
 * }
 *
 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	int ret = 0;
 *
 * 	if (!atomic_inc_return(&event_call->profile_count))
 * 		ret = register_trace_<call>(ftrace_profile_<call>);
 *
 * 	return ret;
 * }
 *
 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	if (atomic_add_negative(-1, &event->call->profile_count))
 * 		unregister_trace_<call>(ftrace_profile_<call>);
 * }
 *
 */

#undef TP_fast_assign
#define TP_fast_assign(args...)

#undef TP_perf_assign
#define TP_perf_assign(args...) args

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
									\
static void ftrace_profile_##call(proto)				\
{									\
	extern void perf_tpcounter_event(int, u64, u64);		\
	u64 __addr = 0, __count = 1;					\
	{ assign; }							\
	perf_tpcounter_event(event_##call.id, __addr, __count);		\
}									\
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

#undef TP_perf_assign
#define TP_perf_assign(args...)

#endif

/*
 * Stage 4 of the trace events.
 *
@@ -447,28 +532,6 @@ static inline int ftrace_get_offsets_##call( \
#define TP_FMT(fmt, args...)	fmt "\n", ##args

#ifdef CONFIG_EVENT_PROFILE
#define _TRACE_PROFILE(call, proto, args)				\
static void ftrace_profile_##call(proto)				\
{									\
	extern void perf_tpcounter_event(int);				\
	perf_tpcounter_event(event_##call.id);				\
}									\
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
@@ -476,7 +539,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call)
#endif

@@ -502,7 +564,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args))			\
									\
static struct ftrace_event_call event_##call;				\
									\
@@ -586,6 +647,5 @@ __attribute__((section("_ftrace_events"))) event_##call = { \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#undef _TRACE_PROFILE
#undef _TRACE_PROFILE_INIT
+3 −3
Original line number Diff line number Diff line
@@ -3703,17 +3703,17 @@ static const struct pmu perf_ops_task_clock = {
};

#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
void perf_tpcounter_event(int event_id, u64 addr, u64 count)
{
	struct perf_sample_data data = {
		.regs = get_irq_regs(),
		.addr = 0,
		.addr = addr,
	};

	if (!data.regs)
		data.regs = task_pt_regs(current);

	do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
	do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
}
EXPORT_SYMBOL_GPL(perf_tpcounter_event);