Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9ef4c50b authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala Committed by Gerrit - the friendly Code Review server
Browse files

sched: Add snapshot of preemption and IRQs disable callers



This snapshot is taken from msm-4.19 as of
commit 72395648aa0ad18 ("sched: core: Fix usage of cpu core group mask").

Change-Id: I8dc6933a1e3a0835f19c7802930f974d442082a8
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent d7a8a676
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -72,6 +72,13 @@ sched_ravg_window_handler(struct ctl_table *table, int write,
			loff_t *ppos);
#endif

#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_DEBUG_PREEMPT)
extern unsigned int sysctl_preemptoff_tracing_threshold_ns;
#endif
#if defined(CONFIG_PREEMPTIRQ_EVENTS) && defined(CONFIG_IRQSOFF_TRACER)
extern unsigned int sysctl_irqsoff_tracing_threshold_ns;
#endif

enum sched_tunable_scaling {
	SCHED_TUNABLESCALING_NONE,
	SCHED_TUNABLESCALING_LOG,
+28 −0
Original line number Diff line number Diff line
@@ -62,6 +62,34 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#define trace_preempt_disable_rcuidle(...)
#endif

TRACE_EVENT(irqs_disable,

	TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1,
				unsigned long caddr2, unsigned long caddr3),

	TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3),

	TP_STRUCT__entry(
		__field(u64, delta)
		__field(void*, caddr0)
		__field(void*, caddr1)
		__field(void*, caddr2)
		__field(void*, caddr3)
	),

	TP_fast_assign(
		__entry->delta = delta;
		__entry->caddr0 = (void *)caddr0;
		__entry->caddr1 = (void *)caddr1;
		__entry->caddr2 = (void *)caddr2;
		__entry->caddr3 = (void *)caddr3;
	),

	TP_printk("delta=%llu(ns) Callers:(%ps<-%ps<-%ps<-%ps)", __entry->delta,
					__entry->caddr0, __entry->caddr1,
					__entry->caddr2, __entry->caddr3)
);

#endif /* _TRACE_PREEMPTIRQ_H */

#include <trace/define_trace.h>
+32 −0
Original line number Diff line number Diff line
@@ -671,6 +671,38 @@ DECLARE_TRACE(sched_overutilized_tp,
	TP_PROTO(struct root_domain *rd, bool overutilized),
	TP_ARGS(rd, overutilized));

TRACE_EVENT(sched_preempt_disable,

	TP_PROTO(u64 delta, bool irqs_disabled,
			unsigned long caddr0, unsigned long caddr1,
			unsigned long caddr2, unsigned long caddr3),

	TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3),

	TP_STRUCT__entry(
		__field(u64, delta)
		__field(bool, irqs_disabled)
		__field(void*, caddr0)
		__field(void*, caddr1)
		__field(void*, caddr2)
		__field(void*, caddr3)
	),

	TP_fast_assign(
		__entry->delta = delta;
		__entry->irqs_disabled = irqs_disabled;
		__entry->caddr0 = (void *)caddr0;
		__entry->caddr1 = (void *)caddr1;
		__entry->caddr2 = (void *)caddr2;
		__entry->caddr3 = (void *)caddr3;
	),

	TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%ps<-%ps<-%ps<-%ps)",
				__entry->delta, __entry->irqs_disabled,
				__entry->caddr0, __entry->caddr1,
				__entry->caddr2, __entry->caddr3)
);

#endif /* _TRACE_SCHED_H */

/* This part must be outside protection */
+54 −1
Original line number Diff line number Diff line
@@ -3847,17 +3847,55 @@ static inline void sched_tick_stop(int cpu) { }

#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
/*
 * preemptoff stack tracing threshold in ns.
 * default: 1ms
 */
unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL;

struct preempt_store {
	u64 ts;
	unsigned long caddr[4];
	bool irqs_disabled;
};

DEFINE_PER_CPU(struct preempt_store, the_ps);

/*
 * This is only called from __schedule() upon context switch.
 *
 * schedule() calls __schedule() with preemption disabled.
 * if we had entered idle and exiting idle now, reset the preemption
 * tracking otherwise we may think preemption is disabled the whole time
 * when the non idle task re-enables the preemption in schedule().
 */
static inline void preempt_latency_reset(void)
{
	if (is_idle_task(this_rq()->curr))
		this_cpu_ptr(&the_ps)->ts = 0;
}

/*
 * If the value passed in is equal to the current preempt count
 * then we just disabled preemption. Start timing the latency.
 */
static inline void preempt_latency_start(int val)
{
	int cpu = raw_smp_processor_id();
	struct preempt_store *ps = &per_cpu(the_ps, cpu);

	if (preempt_count() == val) {
		unsigned long ip = get_lock_parent_ip();
#ifdef CONFIG_DEBUG_PREEMPT
		current->preempt_disable_ip = ip;
#endif
		ps->ts = sched_clock();
		ps->caddr[0] = CALLER_ADDR0;
		ps->caddr[1] = CALLER_ADDR1;
		ps->caddr[2] = CALLER_ADDR2;
		ps->caddr[3] = CALLER_ADDR3;
		ps->irqs_disabled = irqs_disabled();

		trace_preempt_off(CALLER_ADDR0, ip);
	}
}
@@ -3890,9 +3928,22 @@ NOKPROBE_SYMBOL(preempt_count_add);
 */
static inline void preempt_latency_stop(int val)
{
	if (preempt_count() == val)
	if (preempt_count() == val) {
		struct preempt_store *ps = &per_cpu(the_ps,
				raw_smp_processor_id());
		u64 delta = ps->ts ? (sched_clock() - ps->ts) : 0;

		/*
		 * Trace preempt disable stack if preemption
		 * is disabled for more than the threshold.
		 */
		if (delta > sysctl_preemptoff_tracing_threshold_ns)
			trace_sched_preempt_disable(delta, ps->irqs_disabled,
						ps->caddr[0], ps->caddr[1],
						ps->caddr[2], ps->caddr[3]);
		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
	}
}

void preempt_count_sub(int val)
{
@@ -3919,6 +3970,7 @@ NOKPROBE_SYMBOL(preempt_count_sub);
#else
static inline void preempt_latency_start(int val) { }
static inline void preempt_latency_stop(int val) { }
static inline void preempt_latency_reset(void) { }
#endif

static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
@@ -4153,6 +4205,7 @@ static void __sched notrace __schedule(bool preempt)
			prev->last_sleep_ts = wallclock;
#endif

		preempt_latency_reset();
		walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
		walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
		rq->nr_switches++;
+18 −0
Original line number Diff line number Diff line
@@ -339,6 +339,24 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_DEBUG_PREEMPT)
	{
		.procname	= "preemptoff_tracing_threshold_ns",
		.data		= &sysctl_preemptoff_tracing_threshold_ns,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
#endif
#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPTIRQ_EVENTS)
	{
		.procname	= "irqsoff_tracing_threshold_ns",
		.data		= &sysctl_irqsoff_tracing_threshold_ns,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
#endif
#ifdef CONFIG_SCHED_WALT
	{
		.procname	= "sched_user_hint",
Loading