Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b4002bec authored by Chris Redpath's avatar Chris Redpath Committed by Todd Kjos
Browse files

ANDROID: Fixup 64/32-bit divide confusion for WALT configs



Builds cleanly for aarch64 and arm with and without
CONFIG_FAIR_GROUP_SCHED.

Bug: 72707388
Change-Id: Iafd2b8e2b1fb13837b760e3821610d67bfaa48aa
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
parent fb6faf04
Loading
Loading
Loading
Loading
+10 −7
Original line number Diff line number Diff line
@@ -696,6 +696,12 @@ extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int walt_ravg_window;
extern bool walt_disabled;

#define walt_util(util_var, demand_sum) {\
	u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
	do_div(sum, walt_ravg_window);\
	util_var = (typeof(util_var))sum;\
	}
#endif

/*
@@ -722,9 +728,7 @@ TRACE_EVENT(sched_load_avg_cpu,
                __entry->util_avg_pelt  = cfs_rq->avg.util_avg;
                __entry->util_avg_walt  = 0;
#ifdef CONFIG_SCHED_WALT
                __entry->util_avg_walt  =
                                cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
                do_div(__entry->util_avg_walt, walt_ravg_window);
                walt_util(__entry->util_avg_walt, cpu_rq(cpu)->prev_runnable_sum);
                if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
                        __entry->util_avg = __entry->util_avg_walt;
#endif
@@ -775,8 +779,7 @@ TRACE_EVENT(sched_load_se,
#ifdef CONFIG_SCHED_WALT
		if (!se->my_q) {
			struct task_struct *p = container_of(se, struct task_struct, se);
			__entry->util_walt = p->ravg.demand;
			do_div(__entry->util_walt, walt_ravg_window >> SCHED_CAPACITY_SHIFT);
			walt_util(__entry->util_walt, p->ravg.demand);
			if (!walt_disabled && sysctl_sched_use_walt_task_util)
				__entry->util = __entry->util_walt;
		}
@@ -1113,7 +1116,7 @@ TRACE_EVENT(walt_update_history,
		__entry->samples        = samples;
		__entry->evt            = evt;
		__entry->demand         = p->ravg.demand;
		__entry->walt_avg = (__entry->demand << 10) / walt_ravg_window,
		walt_util(__entry->walt_avg,__entry->demand);
		__entry->pelt_avg	= p->se.avg.util_avg;
		memcpy(__entry->hist, p->ravg.sum_history,
					RAVG_HIST_SIZE_MAX * sizeof(u32));
+9 −4
Original line number Diff line number Diff line
@@ -1753,6 +1753,13 @@ extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int walt_ravg_window;
extern bool walt_disabled;

#ifdef CONFIG_SCHED_WALT
#define walt_util(util_var, demand_sum) {\
	u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
	do_div(sum, walt_ravg_window);\
	util_var = (typeof(util_var))sum;\
	}
#endif
/*
 * cpu_util returns the amount of capacity of a CPU that is used by CFS
 * tasks. The unit of the return value must be the one of capacity so we can
@@ -1786,8 +1793,7 @@ static inline unsigned long __cpu_util(int cpu, int delta)

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_CAPACITY_SHIFT;
		util = div_u64(util, walt_ravg_window);
		walt_util(util, cpu_rq(cpu)->cumulative_runnable_avg);
	}
#endif
	delta += util;
@@ -1809,8 +1815,7 @@ static inline unsigned long cpu_util_freq(int cpu)

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		do_div(util, walt_ravg_window);
		walt_util(util, cpu_rq(cpu)->prev_runnable_sum);
	}
#endif
	return (util >= capacity) ? capacity : util;