Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4f400e8 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
parents cb04ff9a 68179686
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -491,8 +491,12 @@ static inline void __ftrace_enabled_restore(int enabled)
  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
  static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
  static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
/*
 * Use defines instead of static inlines because some arches will make code out
 * of the CALLER_ADDR, when we really want these to be a real nop.
 */
# define trace_preempt_on(a0, a1) do { } while (0)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif

#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+11 −33
Original line number Diff line number Diff line
@@ -2469,57 +2469,35 @@ static int
ftrace_avail_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;
	int ret;

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return -ENOMEM;

	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->ops = &global_ops;

	ret = seq_open(file, &show_ftrace_seq_ops);
	if (!ret) {
		struct seq_file *m = file->private_data;

		m->private = iter;
	} else {
		kfree(iter);
	}

	return ret;
	return iter ? 0 : -ENOMEM;
}

static int
ftrace_enabled_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;
	int ret;

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return -ENOMEM;

	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->flags = FTRACE_ITER_ENABLED;
		iter->ops = &global_ops;

	ret = seq_open(file, &show_ftrace_seq_ops);
	if (!ret) {
		struct seq_file *m = file->private_data;

		m->private = iter;
	} else {
		kfree(iter);
	}

	return ret;
	return iter ? 0 : -ENOMEM;
}

static void ftrace_filter_reset(struct ftrace_hash *hash)
+7 −67
Original line number Diff line number Diff line
@@ -87,18 +87,6 @@ static int tracing_disabled = 1;

DEFINE_PER_CPU(int, ftrace_cpu_disabled);

static inline void ftrace_disable_cpu(void)
{
	preempt_disable();
	__this_cpu_inc(ftrace_cpu_disabled);
}

static inline void ftrace_enable_cpu(void)
{
	__this_cpu_dec(ftrace_cpu_disabled);
	preempt_enable();
}

cpumask_var_t __read_mostly	tracing_buffer_mask;

/*
@@ -748,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)

	arch_spin_lock(&ftrace_max_lock);

	ftrace_disable_cpu();

	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);

	if (ret == -EBUSY) {
@@ -763,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
			"Failed to swap buffers due to commit in progress\n");
	}

	ftrace_enable_cpu();

	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);

	__update_max_tr(tr, tsk, cpu);
@@ -916,13 +900,6 @@ void unregister_tracer(struct tracer *type)
	mutex_unlock(&trace_types_lock);
}

static void __tracing_reset(struct ring_buffer *buffer, int cpu)
{
	ftrace_disable_cpu();
	ring_buffer_reset_cpu(buffer, cpu);
	ftrace_enable_cpu();
}

void tracing_reset(struct trace_array *tr, int cpu)
{
	struct ring_buffer *buffer = tr->buffer;
@@ -931,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu)

	/* Make sure all commits have finished */
	synchronize_sched();
	__tracing_reset(buffer, cpu);
	ring_buffer_reset_cpu(buffer, cpu);

	ring_buffer_record_enable(buffer);
}
@@ -949,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
	tr->time_start = ftrace_now(tr->cpu);

	for_each_online_cpu(cpu)
		__tracing_reset(buffer, cpu);
		ring_buffer_reset_cpu(buffer, cpu);

	ring_buffer_record_enable(buffer);
}
@@ -1733,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk);

static void trace_iterator_increment(struct trace_iterator *iter)
{
	/* Don't allow ftrace to trace into the ring buffers */
	ftrace_disable_cpu();

	iter->idx++;
	if (iter->buffer_iter[iter->cpu])
		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);

	ftrace_enable_cpu();
}

static struct trace_entry *
@@ -1750,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
	struct ring_buffer_event *event;
	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];

	/* Don't allow ftrace to trace into the ring buffers */
	ftrace_disable_cpu();

	if (buf_iter)
		event = ring_buffer_iter_peek(buf_iter, ts);
	else
		event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
					 lost_events);

	ftrace_enable_cpu();

	if (event) {
		iter->ent_size = ring_buffer_event_length(event);
		return ring_buffer_event_data(event);
@@ -1850,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)

static void trace_consume(struct trace_iterator *iter)
{
	/* Don't allow ftrace to trace into the ring buffers */
	ftrace_disable_cpu();
	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
			    &iter->lost_events);
	ftrace_enable_cpu();
}

static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1943,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
		iter->cpu = 0;
		iter->idx = -1;

		ftrace_disable_cpu();

		if (cpu_file == TRACE_PIPE_ALL_CPU) {
			for_each_tracing_cpu(cpu)
				tracing_iter_reset(iter, cpu);
		} else
			tracing_iter_reset(iter, cpu_file);

		ftrace_enable_cpu();

		iter->leftover = 0;
		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
			;
@@ -2413,15 +2373,13 @@ static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file)
{
	long cpu_file = (long) inode->i_private;
	void *fail_ret = ERR_PTR(-ENOMEM);
	struct trace_iterator *iter;
	struct seq_file *m;
	int cpu, ret;
	int cpu;

	if (tracing_disabled)
		return ERR_PTR(-ENODEV);

	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
	if (!iter)
		return ERR_PTR(-ENOMEM);

@@ -2478,32 +2436,15 @@ __tracing_open(struct inode *inode, struct file *file)
		tracing_iter_reset(iter, cpu);
	}

	ret = seq_open(file, &tracer_seq_ops);
	if (ret < 0) {
		fail_ret = ERR_PTR(ret);
		goto fail_buffer;
	}

	m = file->private_data;
	m->private = iter;

	mutex_unlock(&trace_types_lock);

	return iter;

 fail_buffer:
	for_each_tracing_cpu(cpu) {
		if (iter->buffer_iter[cpu])
			ring_buffer_read_finish(iter->buffer_iter[cpu]);
	}
	free_cpumask_var(iter->started);
	tracing_start();
 fail:
	mutex_unlock(&trace_types_lock);
	kfree(iter->trace);
	kfree(iter);

	return fail_ret;
	seq_release_private(inode, file);
	return ERR_PTR(-ENOMEM);
}

int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -2539,11 +2480,10 @@ static int tracing_release(struct inode *inode, struct file *file)
	tracing_start();
	mutex_unlock(&trace_types_lock);

	seq_release(inode, file);
	mutex_destroy(&iter->mutex);
	free_cpumask_var(iter->started);
	kfree(iter->trace);
	kfree(iter);
	seq_release_private(inode, file);
	return 0;
}