Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e655519 authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner
Browse files

ftrace: sched tracer, trace full rbtree



Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 4c1f4d4f
Loading
Loading
Loading
Loading
+23 −9
Original line number Diff line number Diff line
@@ -2119,20 +2119,34 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)

#ifdef CONFIG_CONTEXT_SWITCH_TRACER
extern void
ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next);
ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next);
extern void
ftrace_wake_up_task(void *rq, struct task_struct *wakee,
		    struct task_struct *curr);
extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
extern void
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3);
#else
static inline void
ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
{
}
static inline void
sched_trace_special(unsigned long p1, unsigned long p2, unsigned long p3)
{
}
static inline void
ftrace_wake_up_task(void *rq, struct task_struct *wakee,
		    struct task_struct *curr)
{
}
static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
{
}
#endif

#ifdef CONFIG_SCHED_TRACER
extern void
ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr);
#else
static inline void
ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
}
#endif
+32 −3
Original line number Diff line number Diff line
@@ -2394,6 +2394,35 @@ static int sched_balance_self(int cpu, int flag)

#endif /* CONFIG_SMP */

#ifdef CONFIG_CONTEXT_SWITCH_TRACER

void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
{
	struct sched_entity *se;
	struct task_struct *p;
	struct rb_node *curr;
	struct rq *rq = __rq;

	curr = first_fair(&rq->cfs);
	if (!curr)
		return;

	while (curr) {
		se = rb_entry(curr, struct sched_entity, run_node);
		if (!entity_is_task(se))
			continue;

		p = task_of(se);

		__trace_special(__tr, __data,
			      p->pid, p->se.vruntime, p->se.sum_exec_runtime);

		curr = rb_next(curr);
	}
}

#endif

/***
 * try_to_wake_up - wake up a thread
 * @p: the to-be-woken-up thread
@@ -2468,7 +2497,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)

out_activate:
#endif /* CONFIG_SMP */
	ftrace_wake_up_task(p, rq->curr);
	ftrace_wake_up_task(rq, p, rq->curr);
	schedstat_inc(p, se.nr_wakeups);
	if (sync)
		schedstat_inc(p, se.nr_wakeups_sync);
@@ -2613,7 +2642,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
		p->sched_class->task_new(rq, p);
		inc_nr_running(rq);
	}
	ftrace_wake_up_task(p, rq->curr);
	ftrace_wake_up_task(rq, p, rq->curr);
	check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
	if (p->sched_class->task_wake_up)
@@ -2786,7 +2815,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
	struct mm_struct *mm, *oldmm;

	prepare_task_switch(rq, prev, next);
	ftrace_ctx_switch(prev, next);
	ftrace_ctx_switch(rq, prev, next);
	mm = next->mm;
	oldmm = prev->active_mm;
	/*
+23 −32
Original line number Diff line number Diff line
@@ -68,6 +68,17 @@ static int max_tracer_type_len;
static DEFINE_MUTEX(trace_types_lock);
static DECLARE_WAIT_QUEUE_HEAD(trace_wait);

unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;

/*
 * FIXME: where should this be called?
 */
void trace_wake_up(void)
{
	if (!(trace_flags & TRACE_ITER_BLOCK))
		wake_up(&trace_wait);
}

#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))

static int __init set_nr_entries(char *str)
@@ -103,18 +114,6 @@ enum trace_flag_type {
	TRACE_FLAG_SOFTIRQ		= 0x08,
};

enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
};

#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

@@ -132,8 +131,6 @@ static const char *trace_options[] = {
	NULL
};

static unsigned trace_flags = TRACE_ITER_PRINT_PARENT;

static DEFINE_SPINLOCK(ftrace_max_lock);

/*
@@ -660,9 +657,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
	entry->fn.ip		= ip;
	entry->fn.parent_ip	= parent_ip;
	spin_unlock_irqrestore(&data->lock, irq_flags);

	if (!(trace_flags & TRACE_ITER_BLOCK))
		wake_up(&trace_wait);
}

void
@@ -673,10 +667,14 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
		trace_function(tr, data, ip, parent_ip, flags);
}

#ifdef CONFIG_CONTEXT_SWITCH_TRACER

void
trace_special(struct trace_array *tr, struct trace_array_cpu *data,
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
	struct trace_array_cpu *data = __data;
	struct trace_array *tr = __tr;
	struct trace_entry *entry;
	unsigned long irq_flags;

@@ -688,11 +686,10 @@ trace_special(struct trace_array *tr, struct trace_array_cpu *data,
	entry->special.arg2	= arg2;
	entry->special.arg3	= arg3;
	spin_unlock_irqrestore(&data->lock, irq_flags);

	if (!(trace_flags & TRACE_ITER_BLOCK))
		wake_up(&trace_wait);
}

#endif

void __trace_stack(struct trace_array *tr,
		   struct trace_array_cpu *data,
		   unsigned long flags,
@@ -739,9 +736,6 @@ tracing_sched_switch_trace(struct trace_array *tr,
	entry->ctx.next_prio	= next->prio;
	__trace_stack(tr, data, flags, 4);
	spin_unlock_irqrestore(&data->lock, irq_flags);

	if (!(trace_flags & TRACE_ITER_BLOCK))
		wake_up(&trace_wait);
}

void
@@ -765,9 +759,6 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
	entry->ctx.next_prio	= wakee->prio;
	__trace_stack(tr, data, flags, 5);
	spin_unlock_irqrestore(&data->lock, irq_flags);

	if (!(trace_flags & TRACE_ITER_BLOCK))
		wake_up(&trace_wait);
}

#ifdef CONFIG_FTRACE
@@ -1258,7 +1249,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
				 comm);
		break;
	case TRACE_SPECIAL:
		trace_seq_printf(s, " %lx %lx %lx\n",
		trace_seq_printf(s, " %ld %ld %ld\n",
				 entry->special.arg1,
				 entry->special.arg2,
				 entry->special.arg3);
@@ -1344,7 +1335,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
			return 0;
		break;
	case TRACE_SPECIAL:
		ret = trace_seq_printf(s, " %lx %lx %lx\n",
		ret = trace_seq_printf(s, " %ld %ld %ld\n",
				 entry->special.arg1,
				 entry->special.arg2,
				 entry->special.arg3);
@@ -1409,7 +1400,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
		break;
	case TRACE_SPECIAL:
	case TRACE_STACK:
		ret = trace_seq_printf(s, " %lx %lx %lx\n",
		ret = trace_seq_printf(s, " %ld %ld %ld\n",
				 entry->special.arg1,
				 entry->special.arg2,
				 entry->special.arg3);
+14 −0
Original line number Diff line number Diff line
@@ -274,4 +274,18 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,

extern void *head_page(struct trace_array_cpu *data);

extern unsigned long trace_flags;

enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
};

#endif /* _LINUX_KERNEL_TRACE_H */
+16 −8
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@ static struct trace_array *ctx_trace;
static int __read_mostly	tracer_enabled;

static void
ctx_switch_func(struct task_struct *prev, struct task_struct *next)
ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
{
	struct trace_array *tr = ctx_trace;
	struct trace_array_cpu *data;
@@ -34,14 +34,17 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
	if (likely(disabled == 1)) {
		tracing_sched_switch_trace(tr, data, prev, next, flags);
		ftrace_all_fair_tasks(__rq, tr, data);
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

static void wakeup_func(struct task_struct *wakee, struct task_struct *curr)
static void
wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
{
	struct trace_array *tr = ctx_trace;
	struct trace_array_cpu *data;
@@ -57,14 +60,18 @@ static void wakeup_func(struct task_struct *wakee, struct task_struct *curr)
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
	if (likely(disabled == 1)) {
		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
		ftrace_all_fair_tasks(__rq, tr, data);
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
void
ftrace_ctx_switch(void *__rq, struct task_struct *prev,
		  struct task_struct *next)
{
	tracing_record_cmdline(prev);

@@ -72,7 +79,7 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
	 * If tracer_switch_func only points to the local
	 * switch func, it still needs the ptr passed to it.
	 */
	ctx_switch_func(prev, next);
	ctx_switch_func(__rq, prev, next);

	/*
	 * Chain to the wakeup tracer (this is a NOP if disabled):
@@ -81,11 +88,12 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
}

void
ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
		    struct task_struct *curr)
{
	tracing_record_cmdline(curr);

	wakeup_func(wakee, curr);
	wakeup_func(__rq, wakee, curr);

	/*
	 * Chain to the wakeup tracer (this is a NOP if disabled):