Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 596bca7d authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Satya Durga Srinivasu Prabhala
Browse files

trace/sched: Fix compilation for 32 bit systems



do_div() expects the dividend to be a 64 bit type. We are passing
an unsigned long to do_div() from sched_load_avg_task and
sched_load_avg_cpu trace points. This breaks compilation on
a 32 bit system.

Change-Id: I9eb07dba1e62b68d5fc8d12e3f478b22c4ba5e0d
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
Git-commit: a2ee4e794559cf2ba17dd19117d18787e7c2838d
Git-repo: https://android.googlesource.com/kernel/common/


[cslingutla@codeaurora.org: Fixed trivial merge conflicts]
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
[satyap@codeaurora.org: update sched_load_se trace point and fix
trivial merge conflicts]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 6e3f4b1a
Loading
Loading
Loading
Loading
+20 −7
Original line number Diff line number Diff line
@@ -956,7 +956,7 @@ TRACE_EVENT(sched_load_avg_cpu,
		__field(unsigned long,	load_avg)
		__field(unsigned long,	util_avg)
		__field(unsigned long,	util_avg_pelt)
		__field(unsigned long,	util_avg_walt)
		__field(u32,		util_avg_walt)
	),

	TP_fast_assign(
@@ -966,15 +966,15 @@ TRACE_EVENT(sched_load_avg_cpu,
		__entry->util_avg_pelt  = cfs_rq->avg.util_avg;
		__entry->util_avg_walt  = 0;
#ifdef CONFIG_SCHED_WALT
		__entry->util_avg_walt  =
			cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		do_div(__entry->util_avg_walt, sched_ravg_window);
		__entry->util_avg_walt  = div64_ul(cpu_rq(cpu)->prev_runnable_sum,
					  sched_ravg_window >> SCHED_CAPACITY_SHIFT);

		if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
			__entry->util_avg       = __entry->util_avg_walt;
#endif
	),

	TP_printk("cpu=%d load_avg=%lu util_avg=%lu util_avg_pelt=%lu util_avg_walt=%lu",
	TP_printk("cpu=%d load_avg=%lu util_avg=%lu util_avg_pelt=%lu util_avg_walt=%u",
		__entry->cpu, __entry->load_avg, __entry->util_avg,
		__entry->util_avg_pelt, __entry->util_avg_walt)
);
@@ -998,6 +998,8 @@ TRACE_EVENT(sched_load_se,
		__field(	unsigned long,	load			      )
		__field(	unsigned long,	rbl_load		      )
		__field(	unsigned long,	util			      )
		__field(	unsigned long,	util_pelt		      )
		__field(	u32,		util_walt		      )
	),

	TP_fast_assign(
@@ -1013,11 +1015,22 @@ TRACE_EVENT(sched_load_se,
		__entry->load = se->avg.load_avg;
		__entry->rbl_load = se->avg.runnable_load_avg;
		__entry->util = se->avg.util_avg;
		__entry->util_pelt  = __entry->util;
		__entry->util_walt  = 0;
#ifdef CONFIG_SCHED_WALT
		if (!se->my_q) {
			struct task_struct *p = container_of(se, struct task_struct, se);
			__entry->util_walt = p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
			if (!walt_disabled && sysctl_sched_use_walt_task_util)
				__entry->util = __entry->util_walt;
		}
#endif
	),

	TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu",
	TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu util_pelt=%lu util_walt=%u",
		  __entry->cpu, __get_str(path), __entry->comm, __entry->pid,
		  __entry->load, __entry->rbl_load, __entry->util)
		  __entry->load, __entry->rbl_load, __entry->util,
		  __entry->util_pelt, __entry->util_walt)
);

/*