Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 89b2f978 authored by Steven Rostedt's avatar Steven Rostedt Committed by Thomas Gleixner
Browse files

ftrace: fix updates to max trace



This patch fixes some bugs to the updating of the max trace that
was caused by implementing the new buffering.

Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 18cef379
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -153,6 +153,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
		memcpy(max_tr.data[i], data, sizeof(*data));
		data->trace = save_trace;
		data->trace_pages = save_pages;
		tracing_reset(data);
	}

	__update_max_tr(tr, tsk, cpu);
@@ -183,6 +184,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
	memcpy(max_tr.data[cpu], data, sizeof(*data));
	data->trace = save_trace;
	data->trace_pages = save_pages;
	tracing_reset(data);

	__update_max_tr(tr, tsk, cpu);
	spin_unlock(&ftrace_max_lock);
@@ -877,6 +879,8 @@ print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
			   entry->ctx.next_prio,
			   comm);
		break;
	default:
		seq_printf(m, "Unknown type %d\n", entry->type);
	}
}

@@ -1625,7 +1629,6 @@ __init static int tracer_alloc_buffers(void)
	 * round up a bit.
	 */
	global_trace.entries = ENTRIES_PER_PAGE;
	max_tr.entries = global_trace.entries;
	pages++;

	while (global_trace.entries < trace_nr_entries) {
@@ -1633,6 +1636,7 @@ __init static int tracer_alloc_buffers(void)
			break;
		pages++;
	}
	max_tr.entries = global_trace.entries;

	pr_info("tracer: %d pages allocated for %ld",
		pages, trace_nr_entries);
+15 −12
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@ static int tracer_enabled __read_mostly;

static DEFINE_PER_CPU(int, tracing_cpu);

static DEFINE_SPINLOCK(max_trace_lock);

enum {
	TRACER_IRQS_OFF		= (1 << 1),
	TRACER_PREEMPT_OFF	= (1 << 2),
@@ -126,7 +128,7 @@ check_critical_timing(struct trace_array *tr,
		      int cpu)
{
	unsigned long latency, t0, t1;
	cycle_t T0, T1, T2, delta;
	cycle_t T0, T1, delta;
	unsigned long flags;

	/*
@@ -142,20 +144,18 @@ check_critical_timing(struct trace_array *tr,
	if (!report_latency(delta))
		goto out;

	ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
	/*
	 * Update the timestamp, because the trace entry above
	 * might change it (it can only get larger so the latency
	 * is fair to be reported):
	 */
	T2 = now(cpu);
	spin_lock(&max_trace_lock);

	delta = T2-T0;
	/* check if we are still the max latency */
	if (!report_latency(delta))
		goto out_unlock;

	ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);

	latency = nsecs_to_usecs(delta);

	if (data->critical_sequence != max_sequence)
		goto out;
		goto out_unlock;

	tracing_max_latency = delta;
	t0 = nsecs_to_usecs(T0);
@@ -189,6 +189,9 @@ check_critical_timing(struct trace_array *tr,

	max_sequence++;

out_unlock:
	spin_unlock(&max_trace_lock);

out:
	data->critical_sequence = max_sequence;
	data->preempt_timestamp = now(cpu);
@@ -366,14 +369,14 @@ void notrace trace_preempt_off(unsigned long a0, unsigned long a1)

static void start_irqsoff_tracer(struct trace_array *tr)
{
	tracer_enabled = 1;
	register_ftrace_function(&trace_ops);
	tracer_enabled = 1;
}

static void stop_irqsoff_tracer(struct trace_array *tr)
{
	unregister_ftrace_function(&trace_ops);
	tracer_enabled = 0;
	unregister_ftrace_function(&trace_ops);
}

static void __irqsoff_tracer_init(struct trace_array *tr)