Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82e04af4 authored by Frederic Weisbecker's avatar Frederic Weisbecker
Browse files

tracing: Move sched event insertion helpers in the sched switch tracer file



The sched events helpers which insert the sched switch and wakeup
events into the ring buffer currently reside in trace.c
But this file is quite overloaded and the right place for these helpers
is in the sched switch tracer file.

Then move them to trace_functions.c

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
parent c0a0d0d3
Loading
Loading
Loading
Loading
+0 −56
Original line number Original line Diff line number Diff line
@@ -1105,62 +1105,6 @@ __trace_special(void *__tr, void *__data,
	ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
	ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
}
}


void
tracing_sched_switch_trace(struct trace_array *tr,
			   struct task_struct *prev,
			   struct task_struct *next,
			   unsigned long flags, int pc)
{
	struct ftrace_event_call *call = &event_context_switch;
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;

	event = trace_buffer_lock_reserve(tr, TRACE_CTX,
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->prev_pid			= prev->pid;
	entry->prev_prio		= prev->prio;
	entry->prev_state		= prev->state;
	entry->next_pid			= next->pid;
	entry->next_prio		= next->prio;
	entry->next_state		= next->state;
	entry->next_cpu	= task_cpu(next);

	if (!filter_check_discard(call, entry, tr->buffer, event))
		trace_buffer_unlock_commit(tr, event, flags, pc);
}

void
tracing_sched_wakeup_trace(struct trace_array *tr,
			   struct task_struct *wakee,
			   struct task_struct *curr,
			   unsigned long flags, int pc)
{
	struct ftrace_event_call *call = &event_wakeup;
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;

	event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->prev_pid			= curr->pid;
	entry->prev_prio		= curr->prio;
	entry->prev_state		= curr->state;
	entry->next_pid			= wakee->pid;
	entry->next_prio		= wakee->prio;
	entry->next_state		= wakee->state;
	entry->next_cpu			= task_cpu(wakee);

	if (!filter_check_discard(call, entry, tr->buffer, event))
		ring_buffer_unlock_commit(tr->buffer, event);
	ftrace_trace_stack(tr, flags, 6, pc);
	ftrace_trace_userstack(tr, flags, pc);
}

void
void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
{
+57 −0
Original line number Original line Diff line number Diff line
@@ -20,6 +20,34 @@ static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
static DEFINE_MUTEX(sched_register_mutex);
static int			sched_stopped;
static int			sched_stopped;



void
tracing_sched_switch_trace(struct trace_array *tr,
			   struct task_struct *prev,
			   struct task_struct *next,
			   unsigned long flags, int pc)
{
	struct ftrace_event_call *call = &event_context_switch;
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;

	event = trace_buffer_lock_reserve(tr, TRACE_CTX,
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->prev_pid			= prev->pid;
	entry->prev_prio		= prev->prio;
	entry->prev_state		= prev->state;
	entry->next_pid			= next->pid;
	entry->next_prio		= next->prio;
	entry->next_state		= next->state;
	entry->next_cpu	= task_cpu(next);

	if (!filter_check_discard(call, entry, tr->buffer, event))
		trace_buffer_unlock_commit(tr, event, flags, pc);
}

static void
static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
			struct task_struct *next)
			struct task_struct *next)
@@ -49,6 +77,35 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}


void
tracing_sched_wakeup_trace(struct trace_array *tr,
			   struct task_struct *wakee,
			   struct task_struct *curr,
			   unsigned long flags, int pc)
{
	struct ftrace_event_call *call = &event_wakeup;
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;

	event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->prev_pid			= curr->pid;
	entry->prev_prio		= curr->prio;
	entry->prev_state		= curr->state;
	entry->next_pid			= wakee->pid;
	entry->next_prio		= wakee->prio;
	entry->next_state		= wakee->state;
	entry->next_cpu			= task_cpu(wakee);

	if (!filter_check_discard(call, entry, tr->buffer, event))
		ring_buffer_unlock_commit(tr->buffer, event);
	ftrace_trace_stack(tr, flags, 6, pc);
	ftrace_trace_userstack(tr, flags, pc);
}

static void
static void
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
{
{