Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ea2e6d7 authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar
Browse files

ftrace: make some tracers reentrant



Now that the ring buffer is reentrant, some of the ftrace tracers
(sched_swich, debugging traces) can also be reentrant.

Note: Never make the function tracer reentrant, that can cause
  recursion problems all over the kernel. The function tracer
  must disable reentrancy.

Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bf41a158
Loading
Loading
Loading
Loading
+2 −8
Original line number Diff line number Diff line
@@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
	struct trace_array *tr = &global_trace;
	struct trace_array_cpu *data;
	long disabled;
	int cpu;
	int pc;

@@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
	preempt_disable_notrace();
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
	if (likely(!atomic_read(&data->disabled)))
		ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);

	atomic_dec(&data->disabled);
	preempt_enable_notrace();
}

@@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
	struct trace_array_cpu *data;
	struct print_entry *entry;
	unsigned long flags, irq_flags;
	long disabled;
	int cpu, len = 0, size, pc;

	if (!tr->ctrl || tracing_disabled)
@@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
	preempt_disable_notrace();
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (unlikely(disabled != 1))
	if (unlikely(atomic_read(&data->disabled)))
		goto out;

	spin_lock_irqsave(&trace_buf_lock, flags);
@@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
	spin_unlock_irqrestore(&trace_buf_lock, flags);

 out:
	atomic_dec(&data->disabled);
	preempt_enable_notrace();

	return len;
+2 −8
Original line number Diff line number Diff line
@@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
{
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

@@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = ctx_trace->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
	if (likely(!atomic_read(&data->disabled)))
		tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

@@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
{
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu, pc;

	if (!likely(tracer_enabled))
@@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = ctx_trace->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
	if (likely(!atomic_read(&data->disabled)))
		tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
					   flags, pc);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}