Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f79ec886 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull tracing fixes from Steven Rostedt:
 "Three minor updates

   - Use the new GFP_RETRY_MAYFAIL to be more aggressive in allocating
     memory for the ring buffer without causing OOMs

   - Fix a memory leak in adding and removing instances

   - Add __rcu annotation to be able to debug RCU usage of function
     tracing a bit better"

* tag 'trace-v4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  trace: fix the errors caused by incompatible type of RCU variables
  tracing: Fix kmemleak in instance_rmdir
  tracing/ring_buffer: Try harder to allocate
parents b0a75281 f86f4180
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -145,8 +145,8 @@ enum {
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
	struct ftrace_hash		*notrace_hash;
	struct ftrace_hash		*filter_hash;
	struct ftrace_hash __rcu	*notrace_hash;
	struct ftrace_hash __rcu	*filter_hash;
	struct mutex			regex_lock;
};

@@ -168,7 +168,7 @@ static inline void ftrace_free_init_mem(void) { }
 */
struct ftrace_ops {
	ftrace_func_t			func;
	struct ftrace_ops		*next;
	struct ftrace_ops __rcu		*next;
	unsigned long			flags;
	void				*private;
	ftrace_func_t			saved_func;
+1 −1
Original line number Diff line number Diff line
@@ -338,7 +338,7 @@ enum {
struct trace_event_file {
	struct list_head		list;
	struct trace_event_call		*event_call;
	struct event_filter		*filter;
	struct event_filter __rcu	*filter;
	struct dentry			*dir;
	struct trace_array		*tr;
	struct trace_subsystem_dir	*system;
+27 −14
Original line number Diff line number Diff line
@@ -113,7 +113,7 @@ static int ftrace_disabled __read_mostly;

static DEFINE_MUTEX(ftrace_lock);

static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;

@@ -169,8 +169,11 @@ int ftrace_nr_registered_ops(void)

	mutex_lock(&ftrace_lock);

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next)
	for (ops = rcu_dereference_protected(ftrace_ops_list,
					     lockdep_is_held(&ftrace_lock));
	     ops != &ftrace_list_end;
	     ops = rcu_dereference_protected(ops->next,
					     lockdep_is_held(&ftrace_lock)))
		cnt++;

	mutex_unlock(&ftrace_lock);
@@ -275,10 +278,11 @@ static void update_ftrace_function(void)
	 * If there's only one ftrace_ops registered, the ftrace_ops_list
	 * will point to the ops we want.
	 */
	set_function_trace_op = ftrace_ops_list;
	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
						lockdep_is_held(&ftrace_lock));

	/* If there's no ftrace_ops registered, just call the stub function */
	if (ftrace_ops_list == &ftrace_list_end) {
	if (set_function_trace_op == &ftrace_list_end) {
		func = ftrace_stub;

	/*
@@ -286,7 +290,8 @@ static void update_ftrace_function(void)
	 * recursion safe and not dynamic and the arch supports passing ops,
	 * then have the mcount trampoline call the function directly.
	 */
	} else if (ftrace_ops_list->next == &ftrace_list_end) {
	} else if (rcu_dereference_protected(ftrace_ops_list->next,
			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
		func = ftrace_ops_get_list_func(ftrace_ops_list);

	} else {
@@ -348,9 +353,11 @@ int using_ftrace_ops_list_func(void)
	return ftrace_trace_function == ftrace_ops_list_func;
}

static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
static void add_ftrace_ops(struct ftrace_ops __rcu **list,
			   struct ftrace_ops *ops)
{
	ops->next = *list;
	rcu_assign_pointer(ops->next, *list);

	/*
	 * We are entering ops into the list but another
	 * CPU might be walking that list. We need to make sure
@@ -360,7 +367,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
	rcu_assign_pointer(*list, ops);
}

static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
			     struct ftrace_ops *ops)
{
	struct ftrace_ops **p;

@@ -368,7 +376,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
	 * If we are removing the last function, then simply point
	 * to the ftrace_stub.
	 */
	if (*list == ops && ops->next == &ftrace_list_end) {
	if (rcu_dereference_protected(*list,
			lockdep_is_held(&ftrace_lock)) == ops &&
	    rcu_dereference_protected(ops->next,
			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
		*list = &ftrace_list_end;
		return 0;
	}
@@ -1569,8 +1580,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
		return 0;
#endif

	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);

	if (hash_contains_ip(ip, &hash))
		ret = 1;
@@ -2840,7 +2851,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
	 * If there's no more ops registered with ftrace, run a
	 * sanity check to make sure all rec flags are cleared.
	 */
	if (ftrace_ops_list == &ftrace_list_end) {
	if (rcu_dereference_protected(ftrace_ops_list,
			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
		struct ftrace_page *pg;
		struct dyn_ftrace *rec;

@@ -6453,7 +6465,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
	if (ftrace_enabled) {

		/* we are starting ftrace again */
		if (ftrace_ops_list != &ftrace_list_end)
		if (rcu_dereference_protected(ftrace_ops_list,
			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
			update_ftrace_function();

		ftrace_startup_sysctl();
+5 −5
Original line number Diff line number Diff line
@@ -1136,12 +1136,12 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
	for (i = 0; i < nr_pages; i++) {
		struct page *page;
		/*
		 * __GFP_NORETRY flag makes sure that the allocation fails
		 * gracefully without invoking oom-killer and the system is
		 * not destabilized.
		 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
		 * gracefully without invoking oom-killer and the system is not
		 * destabilized.
		 */
		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
				    GFP_KERNEL | __GFP_NORETRY,
				    GFP_KERNEL | __GFP_RETRY_MAYFAIL,
				    cpu_to_node(cpu));
		if (!bpage)
			goto free_pages;
@@ -1149,7 +1149,7 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
		list_add(&bpage->list, pages);

		page = alloc_pages_node(cpu_to_node(cpu),
					GFP_KERNEL | __GFP_NORETRY, 0);
					GFP_KERNEL | __GFP_RETRY_MAYFAIL, 0);
		if (!page)
			goto free_pages;
		bpage->page = page_address(page);
+1 −0
Original line number Diff line number Diff line
@@ -7774,6 +7774,7 @@ static int instance_rmdir(const char *name)
	}
	kfree(tr->topts);

	free_cpumask_var(tr->tracing_cpumask);
	kfree(tr->name);
	kfree(tr);

Loading