Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 307d1bd6 authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar Committed by Gerrit - the friendly Code Review server
Browse files

sched: walt: Use sched_avg for WALT only



sched_avg stats enabled for SMP, but it is used only in WALT.
So move sched_avg under WALT.

Change-Id: I19b558833e12b80f02fea27c9a9fc8b7630d8689
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent c01fcea3
Loading
Loading
Loading
Loading
+3 −7
Original line number Original line Diff line number Diff line
@@ -22,9 +22,11 @@ extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);


#ifdef CONFIG_SMP
#ifdef CONFIG_SCHED_WALT
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern unsigned int sched_get_cpu_util(int cpu);
extern unsigned int sched_get_cpu_util(int cpu);
extern void sched_update_hyst_times(void);
extern u64 sched_lpm_disallowed_time(int cpu);
#else
#else
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
{
{
@@ -33,12 +35,6 @@ static inline unsigned int sched_get_cpu_util(int cpu)
{
{
	return 0;
	return 0;
}
}
#endif

#ifdef CONFIG_SCHED_WALT
extern void sched_update_hyst_times(void);
extern u64 sched_lpm_disallowed_time(int cpu);
#else
static inline void sched_update_hyst_times(void)
static inline void sched_update_hyst_times(void)
{
{
}
}
+2 −2
Original line number Original line Diff line number Diff line
@@ -19,8 +19,8 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle.o fair.o rt.o deadline.o
obj-y += idle.o fair.o rt.o deadline.o
obj-y += wait.o wait_bit.o swait.o completion.o
obj-y += wait.o wait_bit.o swait.o completion.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o sched_avg.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o sched_avg.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
+1 −13
Original line number Original line Diff line number Diff line
@@ -25,14 +25,12 @@ static DEFINE_PER_CPU(u64, nr_max);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;
static s64 last_get_time;


#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_busy_hyst_enable_cpus;
unsigned int sysctl_sched_busy_hyst_enable_cpus;
unsigned int sysctl_sched_busy_hyst;
unsigned int sysctl_sched_busy_hyst;
unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
unsigned int sysctl_sched_coloc_busy_hyst = 39000000;
unsigned int sysctl_sched_coloc_busy_hyst = 39000000;
unsigned int sysctl_sched_coloc_busy_hyst_max_ms = 5000;
unsigned int sysctl_sched_coloc_busy_hyst_max_ms = 5000;
static DEFINE_PER_CPU(atomic64_t, busy_hyst_end_time) = ATOMIC64_INIT(0);
static DEFINE_PER_CPU(atomic64_t, busy_hyst_end_time) = ATOMIC64_INIT(0);
#endif
static DEFINE_PER_CPU(u64, hyst_time);
static DEFINE_PER_CPU(u64, hyst_time);


#define NR_THRESHOLD_PCT		15
#define NR_THRESHOLD_PCT		15
@@ -114,7 +112,6 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
}
}
EXPORT_SYMBOL(sched_get_nr_running_avg);
EXPORT_SYMBOL(sched_get_nr_running_avg);


#ifdef CONFIG_SCHED_WALT
void sched_update_hyst_times(void)
void sched_update_hyst_times(void)
{
{
	u64 std_time, rtgb_time;
	u64 std_time, rtgb_time;
@@ -156,12 +153,6 @@ static inline void update_busy_hyst_end_time(int cpu, bool dequeue,
		atomic64_set(&per_cpu(busy_hyst_end_time, cpu),
		atomic64_set(&per_cpu(busy_hyst_end_time, cpu),
				curr_time + per_cpu(hyst_time, cpu));
				curr_time + per_cpu(hyst_time, cpu));
}
}
#else
static inline void update_busy_hyst_end_time(int cpu, bool dequeue,
				unsigned long prev_nr_run, u64 curr_time)
{
}
#endif


/**
/**
 * sched_update_nr_prod
 * sched_update_nr_prod
@@ -215,10 +206,9 @@ unsigned int sched_get_cpu_util(int cpu)
	util = rq->cfs.avg.util_avg;
	util = rq->cfs.avg.util_avg;
	capacity = capacity_orig_of(cpu);
	capacity = capacity_orig_of(cpu);


#ifdef CONFIG_SCHED_WALT
	util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
	util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
	util = div64_u64(util, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
	util = div64_u64(util, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
#endif

	raw_spin_unlock_irqrestore(&rq->lock, flags);
	raw_spin_unlock_irqrestore(&rq->lock, flags);


	util = (util >= capacity) ? capacity : util;
	util = (util >= capacity) ? capacity : util;
@@ -226,7 +216,6 @@ unsigned int sched_get_cpu_util(int cpu)
	return busy;
	return busy;
}
}


#ifdef CONFIG_SCHED_WALT
u64 sched_lpm_disallowed_time(int cpu)
u64 sched_lpm_disallowed_time(int cpu)
{
{
	u64 now = sched_clock();
	u64 now = sched_clock();
@@ -237,4 +226,3 @@ u64 sched_lpm_disallowed_time(int cpu)


	return 0;
	return 0;
}
}
#endif