Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 182e9f5f authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar
Browse files

ftrace: insert in the ftrace_preempt_disable()/enable() functions



Impact: use new, consolidated APIs in ftrace plugins

This patch replaces the schedule safe preempt disable code with the
ftrace_preempt_disable() and ftrace_preempt_enable() safe functions.

Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8f0a056f
Loading
Loading
Loading
Loading
+9 −18
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@
#include <linux/list.h>
#include <linux/fs.h>

#include "trace.h"

/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0

@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
		return NULL;

	/* If we are tracing schedule, we don't want to recurse */
	resched = need_resched();
	preempt_disable_notrace();
	resched = ftrace_preempt_disable();

	cpu = raw_smp_processor_id();

@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
	return event;

 out:
	if (resched)
		preempt_enable_notrace();
	else
		preempt_enable_notrace();
	ftrace_preempt_enable(resched);
	return NULL;
}

@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
	/*
	 * Only the last preempt count needs to restore preemption.
	 */
	if (preempt_count() == 1) {
		if (per_cpu(rb_need_resched, cpu))
			preempt_enable_no_resched_notrace();
	if (preempt_count() == 1)
		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
	else
			preempt_enable_notrace();
	} else
		preempt_enable_no_resched_notrace();

	return 0;
@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
	if (atomic_read(&buffer->record_disabled))
		return -EBUSY;

	resched = need_resched();
	preempt_disable_notrace();
	resched = ftrace_preempt_disable();

	cpu = raw_smp_processor_id();

@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,

	ret = 0;
 out:
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
	ftrace_preempt_enable(resched);

	return ret;
}
+2 −6
Original line number Diff line number Diff line
@@ -904,8 +904,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
		return;

	pc = preempt_count();
	resched = need_resched();
	preempt_disable_notrace();
	resched = ftrace_preempt_disable();
	local_save_flags(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
@@ -915,10 +914,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
		trace_function(tr, data, ip, parent_ip, flags, pc);

	atomic_dec(&data->disabled);
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
	ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =
+2 −11
Original line number Diff line number Diff line
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
		return;

	pc = preempt_count();
	resched = need_resched();
	preempt_disable_notrace();
	resched = ftrace_preempt_disable();

	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
 out:
	atomic_dec(&data->disabled);

	/*
	 * To prevent recursion from the scheduler, if the
	 * resched flag was set before we entered, then
	 * don't reschedule.
	 */
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
	ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =
+2 −6
Original line number Diff line number Diff line
@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
	if (unlikely(!ftrace_enabled || stack_trace_disabled))
		return;

	resched = need_resched();
	preempt_disable_notrace();
	resched = ftrace_preempt_disable();

	cpu = raw_smp_processor_id();
	/* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
 out:
	per_cpu(trace_active, cpu)--;
	/* prevent recursion in schedule */
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
	ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =