Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fcecef89 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

tracing/sched: Clean up preemptoff/irqsoff extensions



preemptoff/irqsoff tracers are extended to print call stack,
when preemption/irqs are disabled more than a threshold. The
subsequent patches brings the same functionality in a GKI
friendly manner. So, clean up the existing code that does this.

Change-Id: Id86c298d3a044f7c7e99170ea74f79adbe6786a4
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 8a429d00
Loading
Loading
Loading
Loading
+0 −10
Original line number Original line Diff line number Diff line
@@ -82,16 +82,6 @@ extern int sched_busy_hyst_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *lenp, loff_t *ppos);
			void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
#endif


#if defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PREEMPT_TRACER)
extern unsigned int sysctl_preemptoff_tracing_threshold_ns;
#endif
#if defined(CONFIG_PREEMPTIRQ_EVENTS) && defined(CONFIG_IRQSOFF_TRACER)
extern unsigned int sysctl_irqsoff_tracing_threshold_ns;
extern unsigned int sysctl_irqsoff_dmesg_output_enabled;
extern unsigned int sysctl_irqsoff_crash_sentinel_value;
extern unsigned int sysctl_irqsoff_crash_threshold_ns;
#endif

enum sched_tunable_scaling {
enum sched_tunable_scaling {
	SCHED_TUNABLESCALING_NONE,
	SCHED_TUNABLESCALING_NONE,
	SCHED_TUNABLESCALING_LOG,
	SCHED_TUNABLESCALING_LOG,
+0 −67
Original line number Original line Diff line number Diff line
@@ -62,73 +62,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#define trace_preempt_disable_rcuidle(...)
#define trace_preempt_disable_rcuidle(...)
#endif
#endif


TRACE_EVENT(irqs_disable,

	TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1,
				unsigned long caddr2, unsigned long caddr3,
				unsigned long caddr4),

	TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3, caddr4),

	TP_STRUCT__entry(
		__field(u64, delta)
		__field(void*, caddr0)
		__field(void*, caddr1)
		__field(void*, caddr2)
		__field(void*, caddr3)
		__field(void*, caddr4)
	),

	TP_fast_assign(
		__entry->delta = delta;
		__entry->caddr0 = (void *)caddr0;
		__entry->caddr1 = (void *)caddr1;
		__entry->caddr2 = (void *)caddr2;
		__entry->caddr3 = (void *)caddr3;
		__entry->caddr4 = (void *)caddr4;
	),

	TP_printk("delta=%llu(ns) Callers:(%ps<-%ps<-%ps<-%ps<-%ps)",
					__entry->delta, __entry->caddr0,
					__entry->caddr1, __entry->caddr2,
					__entry->caddr3, __entry->caddr4)
);

TRACE_EVENT(sched_preempt_disable,

	TP_PROTO(u64 delta, bool irqs_disabled,	unsigned long caddr0,
			unsigned long caddr1, unsigned long caddr2,
			unsigned long caddr3, unsigned long caddr4),

	TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3, caddr4),

	TP_STRUCT__entry(
		__field(u64, delta)
		__field(bool, irqs_disabled)
		__field(void*, caddr0)
		__field(void*, caddr1)
		__field(void*, caddr2)
		__field(void*, caddr3)
		__field(void*, caddr4)
	),

	TP_fast_assign(
		__entry->delta = delta;
		__entry->irqs_disabled = irqs_disabled;
		__entry->caddr0 = (void *)caddr0;
		__entry->caddr1 = (void *)caddr1;
		__entry->caddr2 = (void *)caddr2;
		__entry->caddr3 = (void *)caddr3;
		__entry->caddr4 = (void *)caddr4;
	),

	TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%ps<-%ps<-%ps<-%ps<-%ps)",
				__entry->delta, __entry->irqs_disabled,
				__entry->caddr0, __entry->caddr1,
				__entry->caddr2, __entry->caddr3,
				__entry->caddr4)
);

#endif /* _TRACE_PREEMPTIRQ_H */
#endif /* _TRACE_PREEMPTIRQ_H */


#include <trace/define_trace.h>
#include <trace/define_trace.h>
+0 −46
Original line number Original line Diff line number Diff line
@@ -140,9 +140,6 @@ static int ten_thousand = 10000;
#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_PERF_EVENTS
static int six_hundred_forty_kb = 640 * 1024;
static int six_hundred_forty_kb = 640 * 1024;
#endif
#endif
static unsigned int __maybe_unused half_million = 500000;
static unsigned int __maybe_unused one_hundred_million = 100000000;
static unsigned int __maybe_unused one_million = 1000000;
static int __maybe_unused max_kswapd_threads = MAX_KSWAPD_THREADS;
static int __maybe_unused max_kswapd_threads = MAX_KSWAPD_THREADS;


#ifdef CONFIG_SCHED_WALT
#ifdef CONFIG_SCHED_WALT
@@ -340,49 +337,6 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
		.proc_handler	= proc_dointvec,
	},
	},
#if defined(CONFIG_PREEMPT_TRACER) && defined(CONFIG_PREEMPTIRQ_EVENTS)
	{
		.procname	= "preemptoff_tracing_threshold_ns",
		.data		= &sysctl_preemptoff_tracing_threshold_ns,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
#endif
#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPTIRQ_EVENTS)
	{
		.procname	= "irqsoff_tracing_threshold_ns",
		.data		= &sysctl_irqsoff_tracing_threshold_ns,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_douintvec_minmax,
		.extra1		= &half_million,
		.extra2		= &one_hundred_million,
	},
	{
		.procname	= "irqsoff_dmesg_output_enabled",
		.data		= &sysctl_irqsoff_dmesg_output_enabled,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
	{
		.procname	= "irqsoff_crash_sentinel_value",
		.data		= &sysctl_irqsoff_crash_sentinel_value,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
	{
		.procname	= "irqsoff_crash_threshold_ns",
		.data		= &sysctl_irqsoff_crash_threshold_ns,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_douintvec_minmax,
		.extra1		= &one_million,
		.extra2		= &one_hundred_million,
	},
#endif
#ifdef CONFIG_SCHED_WALT
#ifdef CONFIG_SCHED_WALT
	{
	{
		.procname	= "sched_user_hint",
		.procname	= "sched_user_hint",
+0 −135
Original line number Original line Diff line number Diff line
@@ -607,70 +607,12 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
}
}


#ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_PREEMPTIRQ_EVENTS
#define IRQSOFF_SENTINEL 0x0fffDEAD
/*
 * irqsoff stack tracing threshold in ns.
 * default: 5ms
 */
unsigned int sysctl_irqsoff_tracing_threshold_ns = 5000000UL;
/*
 * Enable irqsoff tracing to dmesg
 */
unsigned int sysctl_irqsoff_dmesg_output_enabled;
/*
 * Sentinel value to prevent unnecessary irqsoff crash
 */
unsigned int sysctl_irqsoff_crash_sentinel_value;
/*
 * Irqsoff warning threshold to trigger crash
 */
unsigned int sysctl_irqsoff_crash_threshold_ns = 10000000UL;

struct irqsoff_store {
	u64 ts;
	unsigned long caddr[5];
};

static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff);
#endif /* CONFIG_PREEMPTIRQ_EVENTS */

/*
/*
 * We are only interested in hardirq on/off events:
 * We are only interested in hardirq on/off events:
 */
 */
void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
{
{
	unsigned int pc = preempt_count();
	unsigned int pc = preempt_count();
#ifdef CONFIG_PREEMPTIRQ_EVENTS
	struct irqsoff_store *is;
	u64 delta;

	lockdep_off();
	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
	delta = sched_clock() - is->ts;

	if (!is_idle_task(current) &&
			delta > sysctl_irqsoff_tracing_threshold_ns) {
		trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
					is->caddr[2], is->caddr[3],
					is->caddr[4]);
		if (sysctl_irqsoff_dmesg_output_enabled == IRQSOFF_SENTINEL)
			printk_deferred(KERN_ERR "D=%llu C:(%ps<-%ps<-%ps<-%ps)\n",
				delta, is->caddr[0], is->caddr[1],
					is->caddr[2], is->caddr[3]);
		if (sysctl_irqsoff_crash_sentinel_value == IRQSOFF_SENTINEL &&
			delta > sysctl_irqsoff_crash_threshold_ns) {
			printk_deferred(KERN_ERR
			"delta=%llu(ns) > crash_threshold=%llu(ns) Task=%s\n",
				delta, sysctl_irqsoff_crash_threshold_ns,
					current->comm);
			BUG_ON(1);
		}
	}

	is->ts = 0;
	lockdep_on();
#endif /* CONFIG_PREEMPTIRQ_EVENTS */


	if (!preempt_trace(pc) && irq_trace())
	if (!preempt_trace(pc) && irq_trace())
		stop_critical_timing(a0, a1, pc);
		stop_critical_timing(a0, a1, pc);
@@ -680,19 +622,6 @@ NOKPROBE_SYMBOL(tracer_hardirqs_on);
void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
{
{
	unsigned int pc = preempt_count();
	unsigned int pc = preempt_count();
#ifdef CONFIG_PREEMPTIRQ_EVENTS
	struct irqsoff_store *is;

	lockdep_off();
	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
	is->ts = sched_clock();
	is->caddr[0] = CALLER_ADDR1;
	is->caddr[1] = CALLER_ADDR2;
	is->caddr[2] = CALLER_ADDR3;
	is->caddr[3] = CALLER_ADDR4;
	is->caddr[4] = CALLER_ADDR5;
	lockdep_on();
#endif /* CONFIG_PREEMPTIRQ_EVENTS */


	if (!preempt_trace(pc) && irq_trace())
	if (!preempt_trace(pc) && irq_trace())
		start_critical_timing(a0, a1, pc);
		start_critical_timing(a0, a1, pc);
@@ -733,57 +662,9 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif /*  CONFIG_IRQSOFF_TRACER */
#endif /*  CONFIG_IRQSOFF_TRACER */


#ifdef CONFIG_PREEMPT_TRACER
#ifdef CONFIG_PREEMPT_TRACER
#ifdef CONFIG_PREEMPTIRQ_EVENTS
/*
 * preemptoff stack tracing threshold in ns.
 * default: 1ms
 */
unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL;

struct preempt_store {
	u64 ts;
	unsigned long caddr[5];
	bool irqs_disabled;
	int pid;
	unsigned long ncsw;
};

static DEFINE_PER_CPU(struct preempt_store, the_ps);
#endif /* CONFIG_PREEMPTIRQ_EVENTS */

void tracer_preempt_on(unsigned long a0, unsigned long a1)
void tracer_preempt_on(unsigned long a0, unsigned long a1)
{
{
	int pc = preempt_count();
	int pc = preempt_count();
#ifdef CONFIG_PREEMPTIRQ_EVENTS
	struct preempt_store *ps;
	u64 delta = 0;

	lockdep_off();
	ps = &per_cpu(the_ps, raw_smp_processor_id());

	/*
	 * schedule() calls __schedule() with preemption disabled.
	 * if we had entered idle and exiting idle now, we think
	 * preemption is disabled the whole time. Detect this by
	 * checking if the preemption is disabled across the same
	 * task. There is a possiblity that the same task is scheduled
	 * after idle. To rule out this possibility, compare the
	 * context switch count also.
	 */
	if (ps->ts && ps->pid == current->pid && (ps->ncsw ==
			current->nvcsw + current->nivcsw))
		delta = sched_clock() - ps->ts;
	/*
	 * Trace preempt disable stack if preemption
	 * is disabled for more than the threshold.
	 */
	if (delta > sysctl_preemptoff_tracing_threshold_ns)
		trace_sched_preempt_disable(delta, ps->irqs_disabled,
				ps->caddr[0], ps->caddr[1],
				ps->caddr[2], ps->caddr[3], ps->caddr[4]);
	ps->ts = 0;
	lockdep_on();
#endif /* CONFIG_PREEMPTIRQ_EVENTS */


	if (preempt_trace(pc) && !irq_trace())
	if (preempt_trace(pc) && !irq_trace())
		stop_critical_timing(a0, a1, pc);
		stop_critical_timing(a0, a1, pc);
@@ -792,22 +673,6 @@ void tracer_preempt_on(unsigned long a0, unsigned long a1)
void tracer_preempt_off(unsigned long a0, unsigned long a1)
void tracer_preempt_off(unsigned long a0, unsigned long a1)
{
{
	int pc = preempt_count();
	int pc = preempt_count();
#ifdef CONFIG_PREEMPTIRQ_EVENTS
	struct preempt_store *ps;

	lockdep_off();
	ps = &per_cpu(the_ps, raw_smp_processor_id());
	ps->ts = sched_clock();
	ps->caddr[0] = CALLER_ADDR1;
	ps->caddr[1] = CALLER_ADDR2;
	ps->caddr[2] = CALLER_ADDR3;
	ps->caddr[3] = CALLER_ADDR4;
	ps->caddr[4] = CALLER_ADDR5;
	ps->irqs_disabled = irqs_disabled();
	ps->pid = current->pid;
	ps->ncsw = current->nvcsw + current->nivcsw;
	lockdep_on();
#endif /* CONFIG_PREEMPTIRQ_EVENTS */


	if (preempt_trace(pc) && !irq_trace())
	if (preempt_trace(pc) && !irq_trace())
		start_critical_timing(a0, a1, pc);
		start_critical_timing(a0, a1, pc);