Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95d0ad04 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf_counter: Fix/complete ftrace event records sampling
  perf_counter, ftrace: Fix perf_counter integration
  tracing/filters: Always free pred on filter_add_subsystem_pred() failure
  tracing/filters: Don't use pred on alloc failure
  ring-buffer: Fix memleak in ring_buffer_free()
  tracing: Fix recordmcount.pl to handle sections with only weak functions
  ring-buffer: Fix advance of reader in rb_buffer_peek()
  tracing: do not use functions starting with .L in recordmcount.pl
  ring-buffer: do not disable ring buffer on oops_in_progress
  ring-buffer: fix check of try_to_discard result
parents 413dd876 f413cdb8
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -89,7 +89,9 @@ enum print_line_t {
	TRACE_TYPE_NO_CONSUME	= 3	/* Handled but ask to not consume */
};


void tracing_generic_entry_update(struct trace_entry *entry,
				  unsigned long flags,
				  int pc);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(int type, unsigned long len,
				  unsigned long flags, int pc);
+8 −1
Original line number Diff line number Diff line
@@ -121,8 +121,9 @@ enum perf_counter_sample_format {
	PERF_SAMPLE_CPU				= 1U << 7,
	PERF_SAMPLE_PERIOD			= 1U << 8,
	PERF_SAMPLE_STREAM_ID			= 1U << 9,
	PERF_SAMPLE_TP_RECORD			= 1U << 10,

	PERF_SAMPLE_MAX = 1U << 10,		/* non-ABI */
	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */
};

/*
@@ -413,6 +414,11 @@ struct perf_callchain_entry {
	__u64				ip[PERF_MAX_STACK_DEPTH];
};

struct perf_tracepoint_record {
	int				size;
	char				*record;
};

struct task_struct;

/**
@@ -681,6 +687,7 @@ struct perf_sample_data {
	struct pt_regs			*regs;
	u64				addr;
	u64				period;
	void				*private;
};

extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
+147 −25
Original line number Diff line number Diff line
@@ -144,6 +144,9 @@
#undef TP_fast_assign
#define TP_fast_assign(args...) args

#undef TP_perf_assign
#define TP_perf_assign(args...)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#ifdef CONFIG_EVENT_PROFILE

/*
 * Generate the functions needed for tracepoint perf_counter support.
 *
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
 *
 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	int ret = 0;
 *
 * 	if (!atomic_inc_return(&event_call->profile_count))
 * 		ret = register_trace_<call>(ftrace_profile_<call>);
 *
 * 	return ret;
 * }
 *
 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	if (atomic_add_negative(-1, &event->call->profile_count))
 * 		unregister_trace_<call>(ftrace_profile_<call>);
 * }
 *
 */

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
									\
static void ftrace_profile_##call(proto);				\
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

/*
 * Stage 4 of the trace events.
 *
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
#define TP_FMT(fmt, args...)	fmt "\n", ##args

#ifdef CONFIG_EVENT_PROFILE
#define _TRACE_PROFILE(call, proto, args)				\
static void ftrace_profile_##call(proto)				\
{									\
	extern void perf_tpcounter_event(int);				\
	perf_tpcounter_event(event_##call.id);				\
}									\
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call)
#endif

@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args))			\
									\
static struct ftrace_event_call event_##call;				\
									\
@@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = { \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#undef _TRACE_PROFILE
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
 *	extern void perf_tpcounter_event(int, u64, u64, void *, int);
 *	struct ftrace_raw_##call *entry;
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
 *	int __entry_size;
 *	int __data_size;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
 *	__entry_size = __data_size + sizeof(*entry);
 *
 *	do {
 *		char raw_data[__entry_size]; <- allocate our sample in the stack
 *		struct trace_entry *ent;
 *
 *		entry = (struct ftrace_raw_<call> *)raw_data;
 *		ent = &entry->ent;
 *		tracing_generic_entry_update(ent, irq_flags, pc);
 *		ent->type = event_call->id;
 *
 *		<tstruct> <- do some jobs with dynamic arrays
 *
 *		<assign>  <- affect our values
 *
 *		perf_tpcounter_event(event_call->id, __addr, __count, entry,
 *			     __entry_size);  <- submit them to perf counter
 *	} while (0);
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static void ftrace_profile_##call(proto)				\
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
	struct ftrace_event_call *event_call = &event_##call;		\
	extern void perf_tpcounter_event(int, u64, u64, void *, int);	\
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
	int __entry_size;						\
	int __data_size;						\
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
	__entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
									\
	do {								\
		char raw_data[__entry_size];				\
		struct trace_entry *ent;				\
									\
		entry = (struct ftrace_raw_##call *)raw_data;		\
		ent = &entry->ent;					\
		tracing_generic_entry_update(ent, irq_flags, pc);	\
		ent->type = event_call->id;				\
									\
		tstruct							\
									\
		{ assign; }						\
									\
		perf_tpcounter_event(event_call->id, __addr, __count, entry,\
			     __entry_size);				\
	} while (0);							\
									\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

#undef _TRACE_PROFILE_INIT
+19 −3
Original line number Diff line number Diff line
@@ -2646,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
		u64 counter;
	} group_entry;
	struct perf_callchain_entry *callchain = NULL;
	struct perf_tracepoint_record *tp;
	int callchain_size = 0;
	u64 time;
	struct {
@@ -2714,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
			header.size += sizeof(u64);
	}

	if (sample_type & PERF_SAMPLE_TP_RECORD) {
		tp = data->private;
		header.size += tp->size;
	}

	ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
	if (ret)
		return;
@@ -2777,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
		}
	}

	if (sample_type & PERF_SAMPLE_TP_RECORD)
		perf_output_copy(&handle, tp->record, tp->size);

	perf_output_end(&handle);
}

@@ -3703,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = {
};

#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
			  int entry_size)
{
	struct perf_tracepoint_record tp = {
		.size = entry_size,
		.record = record,
	};

	struct perf_sample_data data = {
		.regs = get_irq_regs(),
		.addr = 0,
		.addr = addr,
		.private = &tp,
	};

	if (!data.regs)
		data.regs = task_pt_regs(current);

	do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
	do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
}
EXPORT_SYMBOL_GPL(perf_tpcounter_event);

+7 −8
Original line number Diff line number Diff line
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer)

	put_online_cpus();

	kfree(buffer->buffers);
	free_cpumask_var(buffer->cpumask);

	kfree(buffer);
@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
	 */
	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));

	if (!rb_try_to_discard(cpu_buffer, event))
	if (rb_try_to_discard(cpu_buffer, event))
		goto out;

	/*
@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
		 * the box. Return the padding, and we will release
		 * the current locks, and try again.
		 */
		rb_advance_reader(cpu_buffer);
		return event;

	case RINGBUF_TYPE_TIME_EXTEND:
@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void)
	 * buffer too. A one time deal is all you get from reading
	 * the ring buffer from an NMI.
	 */
	if (likely(!in_nmi() && !oops_in_progress))
	if (likely(!in_nmi()))
		return 1;

	tracing_off_permanent();
@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
	if (dolock)
		spin_lock(&cpu_buffer->reader_lock);
	event = rb_buffer_peek(buffer, cpu, ts);
	if (event && event->type_len == RINGBUF_TYPE_PADDING)
		rb_advance_reader(cpu_buffer);
	if (dolock)
		spin_unlock(&cpu_buffer->reader_lock);
	local_irq_restore(flags);
@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
		spin_lock(&cpu_buffer->reader_lock);

	event = rb_buffer_peek(buffer, cpu, ts);
	if (!event)
		goto out_unlock;

	if (event)
		rb_advance_reader(cpu_buffer);

 out_unlock:
	if (dolock)
		spin_unlock(&cpu_buffer->reader_lock);
	local_irq_restore(flags);
Loading