Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3189cdb3 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt
Browse files

tracing: protect trace_printk from recursion



trace_printk can be called from any context, including NMIs.
If this happens, then we must test for for recursion before
grabbing any spinlocks.

This patch prevents trace_printk from being called recursively.

[ Impact: prevent hard lockup in lockdep event tracer ]

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 261842b7
Loading
Loading
Loading
Loading
+8 −2
Original line number Original line Diff line number Diff line
@@ -1259,6 +1259,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
	struct trace_array_cpu *data;
	struct trace_array_cpu *data;
	struct bprint_entry *entry;
	struct bprint_entry *entry;
	unsigned long flags;
	unsigned long flags;
	int disable;
	int resched;
	int resched;
	int cpu, len = 0, size, pc;
	int cpu, len = 0, size, pc;


@@ -1273,7 +1274,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
	cpu = raw_smp_processor_id();
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	data = tr->data[cpu];


	if (unlikely(atomic_read(&data->disabled)))
	disable = atomic_inc_return(&data->disabled);
	if (unlikely(disable != 1))
		goto out;
		goto out;


	/* Lockdep uses trace_printk for lock tracing */
	/* Lockdep uses trace_printk for lock tracing */
@@ -1301,6 +1303,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
	local_irq_restore(flags);
	local_irq_restore(flags);


out:
out:
	atomic_dec_return(&data->disabled);
	ftrace_preempt_enable(resched);
	ftrace_preempt_enable(resched);
	unpause_graph_tracing();
	unpause_graph_tracing();


@@ -1320,6 +1323,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
	int cpu, len = 0, size, pc;
	int cpu, len = 0, size, pc;
	struct print_entry *entry;
	struct print_entry *entry;
	unsigned long irq_flags;
	unsigned long irq_flags;
	int disable;


	if (tracing_disabled || tracing_selftest_running)
	if (tracing_disabled || tracing_selftest_running)
		return 0;
		return 0;
@@ -1329,7 +1333,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
	cpu = raw_smp_processor_id();
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	data = tr->data[cpu];


	if (unlikely(atomic_read(&data->disabled)))
	disable = atomic_inc_return(&data->disabled);
	if (unlikely(disable != 1))
		goto out;
		goto out;


	pause_graph_tracing();
	pause_graph_tracing();
@@ -1357,6 +1362,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
	raw_local_irq_restore(irq_flags);
	raw_local_irq_restore(irq_flags);
	unpause_graph_tracing();
	unpause_graph_tracing();
 out:
 out:
	atomic_dec_return(&data->disabled);
	preempt_enable_notrace();
	preempt_enable_notrace();


	return len;
	return len;