Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 88a4216c authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner
Browse files

ftrace: sched special



Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 36fc25a9
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -2138,6 +2138,8 @@ extern void
ftrace_wake_up_task(void *rq, struct task_struct *wakee,
		    struct task_struct *curr);
extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
#else
static inline void
ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
@@ -2155,6 +2157,10 @@ ftrace_wake_up_task(void *rq, struct task_struct *wakee,
static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
{
}
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
}
#endif

extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
+3 −0
Original line number Diff line number Diff line
@@ -1061,6 +1061,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
	if (!(this_sd->flags & SD_WAKE_AFFINE))
		return 0;

	ftrace_special(__LINE__, curr->se.avg_overlap, sync);
	ftrace_special(__LINE__, p->se.avg_overlap, -1);
	/*
	 * If the currently running task will sleep within
	 * a reasonable amount of time then attract this newly
@@ -1238,6 +1240,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
	if (unlikely(se == pse))
		return;

	ftrace_special(__LINE__, p->pid, se->last_wakeup);
	cfs_rq_of(pse)->next = pse;

	/*
+3 −3
Original line number Diff line number Diff line
@@ -1251,7 +1251,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
				 comm);
		break;
	case TRACE_SPECIAL:
		trace_seq_printf(s, " %ld %ld %ld\n",
		trace_seq_printf(s, "# %ld %ld %ld\n",
				 entry->special.arg1,
				 entry->special.arg2,
				 entry->special.arg3);
@@ -1335,7 +1335,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
			return 0;
		break;
	case TRACE_SPECIAL:
		ret = trace_seq_printf(s, " %ld %ld %ld\n",
		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
				 entry->special.arg1,
				 entry->special.arg2,
				 entry->special.arg3);
@@ -1400,7 +1400,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
		break;
	case TRACE_SPECIAL:
	case TRACE_STACK:
		ret = trace_seq_printf(s, " %ld %ld %ld\n",
		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
				 entry->special.arg1,
				 entry->special.arg2,
				 entry->special.arg3);
+24 −0
Original line number Diff line number Diff line
@@ -103,6 +103,30 @@ ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
	wakeup_sched_wakeup(wakee, curr);
}

void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
	struct trace_array *tr = ctx_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	if (!tracer_enabled)
		return;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		__trace_special(tr, data, arg1, arg2, arg3);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

static void sched_switch_reset(struct trace_array *tr)
{
	int cpu;