Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3066bc4e authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: move task_util() to sched.h



The same task_util() inline function is defined in fair.c and rt.c.
Move it to a common header file.

While at it, make sure that this function works for 32 bit also.
Previously,

util = ((u64)demand)<<10/sched_ravg_window

This division of two 64 bit quantities gives a linker error on 32
bit system. The same result can be achieved without type casting
the demand to 64 bit and without losing precision by making it

util = demand/(sched_ravg_window>>10).

Change-Id: Iffc2b6b484fbcdac8d6f9739f96798a8d8e035c5
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent d98d9ee8
Loading
Loading
Loading
Loading
+1 −5
Original line number Diff line number Diff line
@@ -24,13 +24,9 @@ extern unsigned int sysctl_sched_initial_task_util;
extern unsigned int sysctl_sched_cstate_aware;
extern unsigned int sysctl_sched_capacity_margin;
extern unsigned int sysctl_sched_capacity_margin_down;
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_init_task_load_pct;
#endif

#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_init_task_load_pct;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
+0 −12
Original line number Diff line number Diff line
@@ -5323,18 +5323,6 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
	return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
}

static inline int task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
		u64 demand = p->ravg.demand;

		return (demand << 10) / sched_ravg_window;
	}
#endif
	return p->se.avg.util_avg;
}

static inline bool
bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{
+0 −12
Original line number Diff line number Diff line
@@ -1713,18 +1713,6 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)

static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);

static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
		u64 demand = p->ravg.demand;

		return (demand << 10) / sched_ravg_window;
	}
#endif
	return p->se.avg.util_avg;
}

static int find_lowest_rq(struct task_struct *task)
{
	struct sched_domain *sd;
+10 −1
Original line number Diff line number Diff line
@@ -1704,9 +1704,18 @@ static inline unsigned long capacity_orig_of(int cpu)
	return cpu_rq(cpu)->cpu_capacity_orig;
}

extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int walt_disabled;

static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_task_util)
		return p->ravg.demand /
		       (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
#endif
	return p->se.avg.util_avg;
}

/*
 * cpu_util returns the amount of capacity of a CPU that is used by CFS
 * tasks. The unit of the return value must be the one of capacity so we can