Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 750ed158 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
parents 3f7edb16 7e40798f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1638,8 +1638,8 @@ ftrace_failures_open(struct inode *inode, struct file *file)

	ret = ftrace_avail_open(inode, file);
	if (!ret) {
		m = (struct seq_file *)file->private_data;
		iter = (struct ftrace_iterator *)m->private;
		m = file->private_data;
		iter = m->private;
		iter->flags = FTRACE_ITER_FAILURES;
	}

+1 −1
Original line number Diff line number Diff line
@@ -2196,7 +2196,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp)

static int tracing_release(struct inode *inode, struct file *file)
{
	struct seq_file *m = (struct seq_file *)file->private_data;
	struct seq_file *m = file->private_data;
	struct trace_iterator *iter;
	int cpu;

+4 −0
Original line number Diff line number Diff line
@@ -343,6 +343,10 @@ void trace_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
		    unsigned long flags, int pc);
void trace_graph_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
		    unsigned long flags, int pc);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
+79 −7
Original line number Diff line number Diff line
@@ -262,6 +262,34 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
		return trace_graph_entry(trace);
}

static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

void __trace_graph_return(struct trace_array *tr,
				struct ftrace_graph_ret *trace,
				unsigned long flags,
@@ -888,12 +916,20 @@ check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
	int *depth_irq;
	struct fgraph_data *data = iter->private;
	int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	if (flags & TRACE_GRAPH_PRINT_IRQS)
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
		return 0;

	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	/*
	 * We are inside the irq code
	 */
@@ -926,12 +962,20 @@ static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
	int *depth_irq;
	struct fgraph_data *data = iter->private;
	int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	if (flags & TRACE_GRAPH_PRINT_IRQS)
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
		return 0;

	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	/*
	 * We are not inside the irq code.
	 */
@@ -1163,7 +1207,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,


enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{
	struct ftrace_graph_ent_entry *field;
	struct fgraph_data *data = iter->private;
@@ -1226,7 +1270,18 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	return print_graph_function_flags(iter, tracer_flags.val);
	return __print_graph_function_flags(iter, tracer_flags.val);
}

enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
					     u32 flags)
{
	if (trace_flags & TRACE_ITER_LATENCY_FMT)
		flags |= TRACE_GRAPH_PRINT_DURATION;
	else
		flags |= TRACE_GRAPH_PRINT_ABS_TIME;

	return __print_graph_function_flags(iter, flags);
}

static enum print_line_t
@@ -1258,7 +1313,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)
	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
}

void print_graph_headers_flags(struct seq_file *s, u32 flags)
static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
{
	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;

@@ -1299,6 +1354,23 @@ void print_graph_headers(struct seq_file *s)
	print_graph_headers_flags(s, tracer_flags.val);
}

void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
	struct trace_iterator *iter = s->private;

	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		/* print nothing if the buffers are empty */
		if (trace_empty(iter))
			return;

		print_trace_header(s, iter);
		flags |= TRACE_GRAPH_PRINT_DURATION;
	} else
		flags |= TRACE_GRAPH_PRINT_ABS_TIME;

	__print_graph_headers_flags(s, flags);
}

void graph_trace_open(struct trace_iterator *iter)
{
	/* pid and depth on the last trace processed */
+55 −97
Original line number Diff line number Diff line
@@ -87,14 +87,22 @@ static __cacheline_aligned_in_smp unsigned long max_sequence;

#ifdef CONFIG_FUNCTION_TRACER
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 * Prologue for the preempt and irqs off function tracers.
 *
 * Returns 1 if it is OK to continue, and data->disabled is
 *            incremented.
 *         0 if the trace is to be ignored, and data->disabled
 *            is kept the same.
 *
 * Note, this function is also used outside this ifdef but
 *  inside the #ifdef of the function graph tracer below.
 *  This is OK, since the function graph tracer is
 *  dependent on the function tracer.
 */
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static int func_prolog_dec(struct trace_array *tr,
			   struct trace_array_cpu **data,
			   unsigned long *flags)
{
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

@@ -106,17 +114,37 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
	 */
	cpu = raw_smp_processor_id();
	if (likely(!per_cpu(tracing_cpu, cpu)))
		return;
		return 0;

	local_save_flags(flags);
	local_save_flags(*flags);
	/* slight chance to get a false positive on tracing_cpu */
	if (!irqs_disabled_flags(flags))
		return;
	if (!irqs_disabled_flags(*flags))
		return 0;

	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	*data = tr->data[cpu];
	disabled = atomic_inc_return(&(*data)->disabled);

	if (likely(disabled == 1))
		return 1;

	atomic_dec(&(*data)->disabled);

	return 0;
}

/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;

	if (!func_prolog_dec(tr, &data, &flags))
		return;

	trace_function(tr, ip, parent_ip, flags, preempt_count());

	atomic_dec(&data->disabled);
@@ -155,30 +183,16 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	cpu = raw_smp_processor_id();
	if (likely(!per_cpu(tracing_cpu, cpu)))
	if (!func_prolog_dec(tr, &data, &flags))
		return 0;

	local_save_flags(flags);
	/* slight chance to get a false positive on tracing_cpu */
	if (!irqs_disabled_flags(flags))
		return 0;

	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
	pc = preempt_count();
	ret = __trace_graph_entry(tr, trace, flags, pc);
	} else
		ret = 0;

	atomic_dec(&data->disabled);

	return ret;
}

@@ -187,27 +201,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	cpu = raw_smp_processor_id();
	if (likely(!per_cpu(tracing_cpu, cpu)))
	if (!func_prolog_dec(tr, &data, &flags))
		return;

	local_save_flags(flags);
	/* slight chance to get a false positive on tracing_cpu */
	if (!irqs_disabled_flags(flags))
		return;

	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
	pc = preempt_count();
	__trace_graph_return(tr, trace, flags, pc);
	}

	atomic_dec(&data->disabled);
}

@@ -229,75 +229,33 @@ static void irqsoff_trace_close(struct trace_iterator *iter)

static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{
	u32 flags = GRAPH_TRACER_FLAGS;

	if (trace_flags & TRACE_ITER_LATENCY_FMT)
		flags |= TRACE_GRAPH_PRINT_DURATION;
	else
		flags |= TRACE_GRAPH_PRINT_ABS_TIME;

	/*
	 * In graph mode call the graph tracer output function,
	 * otherwise go with the TRACE_FN event handler
	 */
	if (is_graph())
		return print_graph_function_flags(iter, flags);
		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);

	return TRACE_TYPE_UNHANDLED;
}

static void irqsoff_print_header(struct seq_file *s)
{
	if (is_graph()) {
		struct trace_iterator *iter = s->private;
		u32 flags = GRAPH_TRACER_FLAGS;

		if (trace_flags & TRACE_ITER_LATENCY_FMT) {
			/* print nothing if the buffers are empty */
			if (trace_empty(iter))
				return;

			print_trace_header(s, iter);
			flags |= TRACE_GRAPH_PRINT_DURATION;
		} else
			flags |= TRACE_GRAPH_PRINT_ABS_TIME;

		print_graph_headers_flags(s, flags);
	} else
	if (is_graph())
		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
	else
		trace_default_header(s);
}

static void
trace_graph_function(struct trace_array *tr,
		 unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

static void
__trace_function(struct trace_array *tr,
		 unsigned long ip, unsigned long parent_ip,
		 unsigned long flags, int pc)
{
	if (!is_graph())
	if (is_graph())
		trace_graph_function(tr, ip, parent_ip, flags, pc);
	else
		trace_function(tr, ip, parent_ip, flags, pc);
	else {
		trace_graph_function(tr, parent_ip, flags, pc);
		trace_graph_function(tr, ip, flags, pc);
	}
}

#else
Loading