Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64edbc56 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'tracing/ftrace' into tracing/core



Merge reason: this mini-topic had outstanding problems that delayed
              its merge, so it does not fast-forward.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 43bd1236 0f6ce3de
Loading
Loading
Loading
Loading
+15 −2
Original line number Diff line number Diff line
@@ -751,12 +751,25 @@ and is between 256 and 4096 characters. It is defined in the file
			ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.

	ftrace=[tracer]
			[ftrace] will set and start the specified tracer
			[FTRACE] will set and start the specified tracer
			as early as possible in order to facilitate early
			boot debugging.

	ftrace_dump_on_oops
			[ftrace] will dump the trace buffers on oops.
			[FTRACE] will dump the trace buffers on oops.

	ftrace_filter=[function-list]
			[FTRACE] Limit the functions traced by the function
			tracer at boot up. function-list is a comma separated
			list of functions. This list can be changed at run
			time by the set_ftrace_filter file in the debugfs
			tracing directory. 

	ftrace_notrace=[function-list]
			[FTRACE] Do not trace the functions specified in
			function-list. This list can be changed at run time
			by the set_ftrace_notrace file in the debugfs
			tracing directory.

	gamecon.map[2|3]=
			[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
+1 −1
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ struct trace_iterator {
	int			cpu_file;
	struct mutex		mutex;
	struct ring_buffer_iter	*buffer_iter[NR_CPUS];
	unsigned long		iter_flags;

	/* The below is zeroed out in pipe_read */
	struct trace_seq	seq;
@@ -58,7 +59,6 @@ struct trace_iterator {
	int			cpu;
	u64			ts;

	unsigned long		iter_flags;
	loff_t			pos;
	long			idx;

+12 −12
Original line number Diff line number Diff line
@@ -7,18 +7,18 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq

#define softirq_name(sirq) { sirq, #sirq }
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val)			\
	__print_symbolic(val,			\
			 softirq_name(HI_SOFTIRQ),			\
			 softirq_name(TIMER_SOFTIRQ),			\
			 softirq_name(NET_TX_SOFTIRQ),			\
			 softirq_name(NET_RX_SOFTIRQ),			\
			 softirq_name(BLOCK_SOFTIRQ),			\
			 softirq_name(TASKLET_SOFTIRQ),			\
			 softirq_name(SCHED_SOFTIRQ),			\
			 softirq_name(HRTIMER_SOFTIRQ),			\
			 softirq_name(RCU_SOFTIRQ))
			 softirq_name(HI),	\
			 softirq_name(TIMER),	\
			 softirq_name(NET_TX),	\
			 softirq_name(NET_RX),	\
			 softirq_name(BLOCK),	\
			 softirq_name(TASKLET),	\
			 softirq_name(SCHED),	\
			 softirq_name(HRTIMER),	\
			 softirq_name(RCU))

/**
 * irq_handler_entry - called immediately before the irq action handler
+90 −36
Original line number Diff line number Diff line
@@ -18,14 +18,17 @@

#include <linux/ftrace_event.h>

#undef __field
#define __field(type, item)		type	item;

#undef __array
#define __array(type, item, len)	type	item[len];

#undef __field
#define __field(type, item)		type	item;
#undef __dynamic_array
#define __dynamic_array(type, item, len) unsigned short __data_loc_##item;

#undef __string
#define __string(item, src)		unsigned short	__str_loc_##item;
#define __string(item, src) __dynamic_array(char, item, -1)

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
@@ -35,7 +38,7 @@
	struct ftrace_raw_##name {				\
		struct trace_entry	ent;			\
		tstruct						\
		char			__str_data[0];		\
		char			__data[0];		\
	};							\
	static struct ftrace_event_call event_##name

@@ -47,30 +50,31 @@
 *
 * Include the following:
 *
 * struct ftrace_str_offsets_<call> {
 *	int				<str1>;
 *	int				<str2>;
 * struct ftrace_data_offsets_<call> {
 *	int				<item1>;
 *	int				<item2>;
 *	[...]
 * };
 *
 * The __string() macro will create each int <str>, this is to
 * keep the offset of each string from the beggining of the event
 * once we perform the strlen() of the src strings.
 *
 * The __dynamic_array() macro will create each int <item>, this is
 * to keep the offset of each array from the beginning of the event.
 */

#undef __field
#define __field(type, item);

#undef __array
#define __array(type, item, len)

#undef __field
#define __field(type, item);
#undef __dynamic_array
#define __dynamic_array(type, item, len)	int item;

#undef __string
#define __string(item, src)	int item;
#define __string(item, src) __dynamic_array(char, item, -1)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
	struct ftrace_str_offsets_##call {				\
	struct ftrace_data_offsets_##call {				\
		tstruct;						\
	};

@@ -119,8 +123,12 @@
#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
		((void *)__entry + __entry->__data_loc_##field)

#undef __get_str
#define __get_str(field)	((char *)__entry + __entry->__str_loc_##field)
#define __get_str(field) (char *)__get_dynamic_array(field)

#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
@@ -207,16 +215,19 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
	if (!ret)							\
		return 0;

#undef __string
#define __string(item, src)						       \
	ret = trace_seq_printf(s, "\tfield: __str_loc " #item ";\t"	       \
			       "offset:%u;tsize:%u;\n",			       \
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
	ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t"	       \
			       "offset:%u;\tsize:%u;\n",		       \
			       (unsigned int)offsetof(typeof(field),	       \
					__str_loc_##item),		       \
			       (unsigned int)sizeof(field.__str_loc_##item));  \
					__data_loc_##item),		       \
			       (unsigned int)sizeof(field.__data_loc_##item)); \
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

@@ -260,11 +271,14 @@ ftrace_format_##call(struct trace_seq *s) \
	if (ret)							\
		return ret;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
	ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
				offsetof(typeof(field), __data_loc_##item),    \
				 sizeof(field.__data_loc_##item), 0);

#undef __string
#define __string(item, src)						       \
	ret = trace_define_field(event_call, "__str_loc", #item,	       \
				offsetof(typeof(field), __str_loc_##item),     \
				 sizeof(field.__str_loc_##item), 0);
#define __string(item, src) __dynamic_array(char, item, -1)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
@@ -288,6 +302,43 @@ ftrace_define_fields_##call(void) \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
	__data_size += (len) * sizeof(type);

#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

/*
 * Stage 4 of the trace events.
 *
@@ -432,15 +483,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

#undef __string
#define __string(item, src)						\
	__str_offsets.item = __str_size +				\
			     offsetof(typeof(*entry), __str_data);	\
	__str_size += strlen(src) + 1;
#define __string(item, src) __dynamic_array(char, item, -1)       	\

#undef __assign_str
#define __assign_str(dst, src)						\
	__entry->__str_loc_##dst = __str_offsets.dst;			\
	strcpy(__get_str(dst), src);

#undef TRACE_EVENT
@@ -451,27 +502,30 @@ static struct ftrace_event_call event_##call; \
									\
static void ftrace_raw_event_##call(proto)				\
{									\
	struct ftrace_str_offsets_##call __maybe_unused __str_offsets;	\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
	struct ftrace_event_call *event_call = &event_##call;		\
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
	unsigned long irq_flags;					\
	int __str_size = 0;						\
	int __data_size;						\
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
	tstruct;							\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
									\
	event = trace_current_buffer_lock_reserve(event_##call.id,	\
				 sizeof(struct ftrace_raw_##call) + __str_size,\
				 sizeof(*entry) + __data_size,		\
				 irq_flags, pc);			\
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
	assign;								\
									\
	tstruct								\
									\
	{ assign; }							\
									\
	if (!filter_current_check_discard(event_call, entry, event))	\
		trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
+30 −30
Original line number Diff line number Diff line
@@ -56,6 +56,13 @@ config CONTEXT_SWITCH_TRACER
	select MARKERS
	bool

# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
# options do not appear when something else selects it. We need the two options
# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
# hidding of the automatic options options.

config TRACING
	bool
	select DEBUG_FS
@@ -66,6 +73,10 @@ config TRACING
	select BINARY_PRINTF
	select EVENT_TRACING

config GENERIC_TRACER
	bool
	select TRACING

#
# Minimum requirements an architecture has to meet for us to
# be able to offer generic tracing facilities:
@@ -95,7 +106,7 @@ config FUNCTION_TRACER
	depends on HAVE_FUNCTION_TRACER
	select FRAME_POINTER
	select KALLSYMS
	select TRACING
	select GENERIC_TRACER
	select CONTEXT_SWITCH_TRACER
	help
	  Enable the kernel to trace every kernel function. This is done
@@ -126,7 +137,7 @@ config IRQSOFF_TRACER
	depends on TRACE_IRQFLAGS_SUPPORT
	depends on GENERIC_TIME
	select TRACE_IRQFLAGS
	select TRACING
	select GENERIC_TRACER
	select TRACER_MAX_TRACE
	help
	  This option measures the time spent in irqs-off critical
@@ -147,7 +158,7 @@ config PREEMPT_TRACER
	default n
	depends on GENERIC_TIME
	depends on PREEMPT
	select TRACING
	select GENERIC_TRACER
	select TRACER_MAX_TRACE
	help
	  This option measures the time spent in preemption off critical
@@ -166,7 +177,7 @@ config PREEMPT_TRACER
config SYSPROF_TRACER
	bool "Sysprof Tracer"
	depends on X86
	select TRACING
	select GENERIC_TRACER
	select CONTEXT_SWITCH_TRACER
	help
	  This tracer provides the trace needed by the 'Sysprof' userspace
@@ -174,44 +185,33 @@ config SYSPROF_TRACER

config SCHED_TRACER
	bool "Scheduling Latency Tracer"
	select TRACING
	select GENERIC_TRACER
	select CONTEXT_SWITCH_TRACER
	select TRACER_MAX_TRACE
	help
	  This tracer tracks the latency of the highest priority task
	  to be scheduled in, starting from the point it has woken up.

config ENABLE_CONTEXT_SWITCH_TRACER
	bool "Trace process context switches"
	select TRACING
	select CONTEXT_SWITCH_TRACER
	help
	  This tracer gets called from the context switch and records
	  all switching of tasks.

config ENABLE_EVENT_TRACING
	bool "Trace various events in the kernel"
config ENABLE_DEFAULT_TRACERS
	bool "Trace process context switches and events"
	depends on !GENERIC_TRACER
	select TRACING
	help
	  This tracer hooks to various trace points in the kernel
	  allowing the user to pick and choose which trace point they
	  want to trace.

	  Note, all tracers enable event tracing. This option is
	  only a convenience to enable event tracing when no other
	  tracers are selected.
	  want to trace. It also includes the sched_switch tracer plugin.

config FTRACE_SYSCALLS
	bool "Trace syscalls"
	depends on HAVE_FTRACE_SYSCALLS
	select TRACING
	select GENERIC_TRACER
	select KALLSYMS
	help
	  Basic tracer to catch the syscall entry and exit events.

config BOOT_TRACER
	bool "Trace boot initcalls"
	select TRACING
	select GENERIC_TRACER
	select CONTEXT_SWITCH_TRACER
	help
	  This tracer helps developers to optimize boot times: it records
@@ -228,7 +228,7 @@ config BOOT_TRACER

config TRACE_BRANCH_PROFILING
	bool
	select TRACING
	select GENERIC_TRACER

choice
	prompt "Branch Profiling"
@@ -308,7 +308,7 @@ config BRANCH_TRACER
config POWER_TRACER
	bool "Trace power consumption behavior"
	depends on X86
	select TRACING
	select GENERIC_TRACER
	help
	  This tracer helps developers to analyze and optimize the kernels
	  power management decisions, specifically the C-state and P-state
@@ -342,14 +342,14 @@ config STACK_TRACER
config HW_BRANCH_TRACER
	depends on HAVE_HW_BRANCH_TRACER
	bool "Trace hw branches"
	select TRACING
	select GENERIC_TRACER
	help
	  This tracer records all branches on the system in a circular
	  buffer giving access to the last N branches for each cpu.

config KMEMTRACE
	bool "Trace SLAB allocations"
	select TRACING
	select GENERIC_TRACER
	help
	  kmemtrace provides tracing for slab allocator functions, such as
	  kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
@@ -369,7 +369,7 @@ config KMEMTRACE

config WORKQUEUE_TRACER
	bool "Trace workqueues"
	select TRACING
	select GENERIC_TRACER
	help
	  The workqueue tracer provides some statistical informations
          about each cpu workqueue thread such as the number of the
@@ -385,7 +385,7 @@ config BLK_DEV_IO_TRACE
	select RELAY
	select DEBUG_FS
	select TRACEPOINTS
	select TRACING
	select GENERIC_TRACER
	select STACKTRACE
	help
	  Say Y here if you want to be able to trace the block layer actions
@@ -446,7 +446,7 @@ config FTRACE_SELFTEST

config FTRACE_STARTUP_TEST
	bool "Perform a startup test on ftrace"
	depends on TRACING
	depends on GENERIC_TRACER
	select FTRACE_SELFTEST
	help
	  This option performs a series of startup tests on ftrace. On bootup
@@ -457,7 +457,7 @@ config FTRACE_STARTUP_TEST
config MMIOTRACE
	bool "Memory mapped IO tracing"
	depends on HAVE_MMIOTRACE_SUPPORT && PCI
	select TRACING
	select GENERIC_TRACER
	help
	  Mmiotrace traces Memory Mapped I/O access and is meant for
	  debugging and reverse engineering. It is called from the ioremap
Loading