Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79b17ea7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull tracing updates from Steven Rostedt:
 "This release has no new tracing features, just clean ups, minor fixes
  and small optimizations"

* tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (25 commits)
  tracing: Remove outdated ring buffer comment
  tracing/probes: Fix a warning message to show correct maximum length
  tracing: Fix return value check in trace_benchmark_reg()
  tracing: Use modern function declaration
  jump_label: Reduce the size of struct static_key
  tracing/probe: Show subsystem name in messages
  tracing/hwlat: Update old comment about migration
  timers: Make flags output in the timer_start tracepoint useful
  tracing: Have traceprobe_probes_write() not access userspace unnecessarily
  tracing: Have COMM event filter key be treated as a string
  ftrace: Have set_graph_function handle multiple functions in one write
  ftrace: Do not hold references of ftrace_graph_{notrace_}hash out of graph_lock
  tracing: Reset parser->buffer to allow multiple "puts"
  ftrace: Have set_graph_functions handle write with RDWR
  ftrace: Reset fgd->hash in ftrace_graph_write()
  ftrace: Replace (void *)1 with a meaningful macro name FTRACE_GRAPH_EMPTY
  ftrace: Create a slight optimization on searching the ftrace_hash
  tracing: Add ftrace_hash_key() helper function
  ftrace: Convert graph filter to use hash tables
  ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  ...
parents e5d56efc 67d04bb2
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -155,7 +155,9 @@ or:


There are a few functions and macros that architectures must implement in order
There are a few functions and macros that architectures must implement in order
to take advantage of this optimization. If there is no architecture support, we
to take advantage of this optimization. If there is no architecture support, we
simply fall back to a traditional, load, test, and jump sequence.
simply fall back to a traditional, load, test, and jump sequence. Also, the
struct jump_entry table must be at least 4-byte aligned because the
static_key->entry field makes use of the two least significant bits.


* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig
* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig


+17 −10
Original line number Original line Diff line number Diff line
@@ -105,29 +105,36 @@ struct ftrace_branch_data {
	};
	};
};
};


struct ftrace_likely_data {
	struct ftrace_branch_data	data;
	unsigned long			constant;
};

/*
/*
 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 * to disable branch tracing on a per file basis.
 * to disable branch tracing on a per file basis.
 */
 */
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
			  int expect, int is_constant);


#define likely_notrace(x)	__builtin_expect(!!(x), 1)
#define likely_notrace(x)	__builtin_expect(!!(x), 1)
#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)


#define __branch_check__(x, expect) ({					\
#define __branch_check__(x, expect, is_constant) ({			\
			int ______r;					\
			int ______r;					\
			static struct ftrace_branch_data		\
			static struct ftrace_likely_data		\
				__attribute__((__aligned__(4)))		\
				__attribute__((__aligned__(4)))		\
				__attribute__((section("_ftrace_annotated_branch"))) \
				__attribute__((section("_ftrace_annotated_branch"))) \
				______f = {				\
				______f = {				\
				.func = __func__,			\
				.data.func = __func__,			\
				.file = __FILE__,			\
				.data.file = __FILE__,			\
				.line = __LINE__,			\
				.data.line = __LINE__,			\
			};						\
			};						\
			______r = likely_notrace(x);			\
			______r = __builtin_expect(!!(x), expect);	\
			ftrace_likely_update(&______f, ______r, expect); \
			ftrace_likely_update(&______f, ______r,		\
					     expect, is_constant);	\
			______r;					\
			______r;					\
		})
		})


@@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 * written by Daniel Walker.
 * written by Daniel Walker.
 */
 */
# ifndef likely
# ifndef likely
#  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
#  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
# endif
# endif
# ifndef unlikely
# ifndef unlikely
#  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
#  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
# endif
# endif


#ifdef CONFIG_PROFILE_ALL_BRANCHES
#ifdef CONFIG_PROFILE_ALL_BRANCHES
+15 −8
Original line number Original line Diff line number Diff line
@@ -89,11 +89,17 @@ extern bool static_key_initialized;


struct static_key {
struct static_key {
	atomic_t enabled;
	atomic_t enabled;
/* Set lsb bit to 1 if branch is default true, 0 ot */
/*
 * bit 0 => 1 if key is initially true
 *	    0 if initially false
 * bit 1 => 1 if points to struct static_key_mod
 *	    0 if points to struct jump_entry
 */
	union {
		unsigned long type;
		struct jump_entry *entries;
		struct jump_entry *entries;
#ifdef CONFIG_MODULES
		struct static_key_mod *next;
		struct static_key_mod *next;
#endif
	};
};
};


#else
#else
@@ -120,7 +126,8 @@ struct module;


#define JUMP_TYPE_FALSE		0UL
#define JUMP_TYPE_FALSE		0UL
#define JUMP_TYPE_TRUE		1UL
#define JUMP_TYPE_TRUE		1UL
#define JUMP_TYPE_MASK	1UL
#define JUMP_TYPE_LINKED	2UL
#define JUMP_TYPE_MASK		3UL


static __always_inline bool static_key_false(struct static_key *key)
static __always_inline bool static_key_false(struct static_key *key)
{
{
+2 −0
Original line number Original line Diff line number Diff line
@@ -61,6 +61,8 @@ struct timer_list {
#define TIMER_ARRAYSHIFT	22
#define TIMER_ARRAYSHIFT	22
#define TIMER_ARRAYMASK		0xFFC00000
#define TIMER_ARRAYMASK		0xFFC00000


#define TIMER_TRACE_FLAGMASK	(TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)

#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
		.entry = { .next = TIMER_ENTRY_STATIC },	\
		.entry = { .next = TIMER_ENTRY_STATIC },	\
		.function = (_function),			\
		.function = (_function),			\
+12 −2
Original line number Original line Diff line number Diff line
@@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init,
	TP_ARGS(timer)
	TP_ARGS(timer)
);
);


#define decode_timer_flags(flags)			\
	__print_flags(flags, "|",			\
		{  TIMER_MIGRATING,	"M" },		\
		{  TIMER_DEFERRABLE,	"D" },		\
		{  TIMER_PINNED,	"P" },		\
		{  TIMER_IRQSAFE,	"I" })

/**
/**
 * timer_start - called when the timer is started
 * timer_start - called when the timer is started
 * @timer:	pointer to struct timer_list
 * @timer:	pointer to struct timer_list
@@ -65,9 +72,12 @@ TRACE_EVENT(timer_start,
		__entry->flags		= flags;
		__entry->flags		= flags;
	),
	),


	TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
	TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
		  __entry->timer, __entry->function, __entry->expires,
		  __entry->timer, __entry->function, __entry->expires,
		  (long)__entry->expires - __entry->now, __entry->flags)
		  (long)__entry->expires - __entry->now,
		  __entry->flags & TIMER_CPUMASK,
		  __entry->flags >> TIMER_ARRAYSHIFT,
		  decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
);


/**
/**
Loading