Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3928a8a2 authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar
Browse files

ftrace: make work with new ring buffer



This patch ports ftrace over to the new ring buffer.

Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ed56829c
Loading
Loading
Loading
Loading
+234 −698

File changed.

Preview size limit exceeded, changes collapsed.

+5 −17
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#include <asm/atomic.h>
#include <linux/sched.h>
#include <linux/clocksource.h>
#include <linux/ring_buffer.h>
#include <linux/mmiotrace.h>
#include <linux/ftrace.h>

@@ -102,7 +103,6 @@ struct trace_field {
	char			flags;
	char			preempt_count;
	int			pid;
	cycle_t			t;
	union {
		struct ftrace_entry		fn;
		struct ctx_switch_entry		ctx;
@@ -139,16 +139,9 @@ struct trace_entry {
 * the trace, etc.)
 */
struct trace_array_cpu {
	struct list_head	trace_pages;
	atomic_t		disabled;
	raw_spinlock_t		lock;
	struct lock_class_key	lock_key;

	/* these fields get copied into max-trace: */
	unsigned		trace_head_idx;
	unsigned		trace_tail_idx;
	void			*trace_head; /* producer */
	void			*trace_tail; /* consumer */
	unsigned long		trace_idx;
	unsigned long		overrun;
	unsigned long		saved_latency;
@@ -172,6 +165,7 @@ struct trace_iterator;
 * They have on/off state as well:
 */
struct trace_array {
	struct ring_buffer	*buffer;
	unsigned long		entries;
	long			ctrl;
	int			cpu;
@@ -219,27 +213,21 @@ struct trace_iterator {
	struct trace_array	*tr;
	struct tracer		*trace;
	void			*private;
	long			last_overrun[NR_CPUS];
	long			overrun[NR_CPUS];
	struct ring_buffer_iter	*buffer_iter[NR_CPUS];

	/* The below is zeroed out in pipe_read */
	struct trace_seq	seq;
	struct trace_entry	*ent;
	int			cpu;

	struct trace_entry	*prev_ent;
	int			prev_cpu;
	u64			ts;

	unsigned long		iter_flags;
	loff_t			pos;
	unsigned long		next_idx[NR_CPUS];
	struct list_head	*next_page[NR_CPUS];
	unsigned		next_page_idx[NR_CPUS];
	long			idx;
};

void trace_wake_up(void);
void tracing_reset(struct trace_array_cpu *data);
void tracing_reset(struct trace_array *tr, int cpu);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
+9 −7
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ static void boot_trace_init(struct trace_array *tr)
	trace_boot_enabled = 0;

	for_each_cpu_mask(cpu, cpu_possible_map)
		tracing_reset(tr->data[cpu]);
		tracing_reset(tr, cpu);
}

static void boot_trace_ctrl_update(struct trace_array *tr)
@@ -74,6 +74,7 @@ struct tracer boot_tracer __read_mostly =

void trace_boot(struct boot_trace *it)
{
	struct ring_buffer_event *event;
	struct trace_entry *entry;
	struct trace_array_cpu *data;
	unsigned long irq_flags;
@@ -85,17 +86,18 @@ void trace_boot(struct boot_trace *it)
	preempt_disable();
	data = tr->data[smp_processor_id()];

	raw_local_irq_save(irq_flags);
	__raw_spin_lock(&data->lock);

	entry = tracing_get_trace_entry(tr, data);
	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
					 &irq_flags);
	if (!event)
		goto out;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(entry, 0);
	entry->type = TRACE_BOOT;
	entry->field.initcall = *it;
	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);

	__raw_spin_unlock(&data->lock);
	raw_local_irq_restore(irq_flags);
	trace_wake_up();

 out:
	preempt_enable();
}
+1 −1
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@ static void function_reset(struct trace_array *tr)
	tr->time_start = ftrace_now(tr->cpu);

	for_each_online_cpu(cpu)
		tracing_reset(tr->data[cpu]);
		tracing_reset(tr, cpu);
}

static void start_function_trace(struct trace_array *tr)
+3 −3
Original line number Diff line number Diff line
@@ -173,7 +173,7 @@ out_unlock:
out:
	data->critical_sequence = max_sequence;
	data->preempt_timestamp = ftrace_now(cpu);
	tracing_reset(data);
	tracing_reset(tr, cpu);
	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
}

@@ -203,7 +203,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
	data->critical_sequence = max_sequence;
	data->preempt_timestamp = ftrace_now(cpu);
	data->critical_start = parent_ip ? : ip;
	tracing_reset(data);
	tracing_reset(tr, cpu);

	local_save_flags(flags);

@@ -234,7 +234,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)

	data = tr->data[cpu];

	if (unlikely(!data) || unlikely(!head_page(data)) ||
	if (unlikely(!data) ||
	    !data->critical_start || atomic_read(&data->disabled))
		return;

Loading