Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25776104 authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

Merge dev/msm-4.9-sched into msm-4.9



  sched: WALT: increase WALT minimum window size to 20ms
  sched: start tracking placement algorithm runtime
  sched: EAS: place sync=1 wakee on waker's domain
  sched: fair: Don't pull task during LB if task doesn't fit
  sched: walt: add the update_task_ravg_mini trace event
  sched: EAS: kill incorrect nohz idle cpu kick
  sched: Fix inefficiencies in the load balancer.
  sched: EAS: allow newly idle load balancing selectively
  sched: EAS: bias towards prev CPU
  sched: EAS: improve task packing algorithm to avoid excessive packing
  sched: EAS: take into account of PF_WAKE_UP_IDLE
  sched: bias towards prev CPU when it's one of idle CPUs
  sched: select backup CPU with least cumulative window demand
  sched: WALT: don't bias towards prev CPU when ediff == 0
  sched: EAS: add trace point sched_group_energy
  sched: Update rq->cpu_capacity_orig on thermal (LMH) throttling
  sched: walt: Acquire rq-locks in update_min_max_capacity
  sched: use cum_window_demand for rt task placement
  sched: use cumulative window demand for task placement
  sched: WALT: account cumulative window demand
  sched: EAS: introduce irqload awareness
  sched: EAS: check if CPU is overutilized correctly
  sched: fix energy diff calculation when sync = 1
  sched: EAS: use busy energy cost when CPU is not in a C-state
  sched: EAS: fix incorrect energy delta calculation due to rounding error
  sched: EAS: introduce cstate aware task placement
  sched: introduce trace events for EAS task placement
  sched: EAS/WALT: take into account of waking task's load
  cpufreq: sched: WALT: don't apply capacity margin twice
  sched: EAS: decouple capacity margin
  sched/fair: kick active load balance for misfit task
  sched/fair: select busiest rq with misfit task
  sched/fair: fix for group_smaller_cpu_capacity()
  sched: Consider misfit tasks when load-balancing
  sched/fair: fix to set sgs->group_misfit_task
  sched/fair: correct task_fits_max() for misfit task
  sched: WALT: fix potential overflow
  sched: WALT: fix frequency invariant
  sched: EAS: schedfreq: fix CPU util over estimation
  sched: add minimum level of energy awareness to rt class
  sched: EAS: fix sync-wakeup task placement logic
  sched: EAS/WALT: finish accounting prior to task_tick
  cpufreq: sched: update capacity request upon tick always
  sched: EAS: avoid sync-wakup on an overutilized CPU
  sched: EAS: prevent excessive task packing
  cpufreq: sched: update CPU utilization upon CFS task dequeue
  sched: EAS: kick nohz idle CPU within the same sched domain
  sched: EAS/WALT: use cr_avg instead of prev_runnable_sum
  sched: EAS/WALT: fix deflated energy cost of previous CPU
  sched: EAS: upmigrate misfit current task more efficiently
  sched: set sysctl_sched_is_big_little = 1
  defconfig: msm: enable EAS (Energy Aware Scheduler)
  sched/walt: factor out WALT from HMP scheduler
  sched/fair: prevent meaningless active migration
  sched: WALT: introduce a struct with load information for schedutil

Change-Id: If5b44b61cb56a4525f8c9139c2b5e153f7dc9f8e
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parents 17a15f82 e2562921
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@ CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
@@ -16,14 +17,13 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
+2 −2
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@ CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
@@ -20,14 +21,13 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
+2 −2
Original line number Diff line number Diff line
@@ -1506,7 +1506,7 @@ static const struct file_operations proc_pid_sched_wake_up_idle_operations = {

#endif	/* CONFIG_SMP */

#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_SCHED_WALT

static int sched_init_task_load_show(struct seq_file *m, void *v)
{
@@ -3062,7 +3062,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SMP
	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
#endif
#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_SCHED_WALT
	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
#endif
+30 −21
Original line number Diff line number Diff line
@@ -1483,7 +1483,9 @@ struct ravg {
	u32 sum_history[RAVG_HIST_SIZE_MAX];
	u32 *curr_window_cpu, *prev_window_cpu;
	u32 curr_window, prev_window;
#ifdef CONFIG_SCHED_HMP
	u64 curr_burst, avg_burst, avg_sleep_time;
#endif
	u16 active_windows;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -1659,7 +1661,8 @@ struct task_struct {
	const struct sched_class *sched_class;
	struct sched_entity se;
	struct sched_rt_entity rt;
#ifdef CONFIG_SCHED_HMP
	u64 last_sleep_ts;
#ifdef CONFIG_SCHED_WALT
	struct ravg ravg;
	/*
	 * 'init_load_pct' represents the initial task load assigned to children
@@ -2635,7 +2638,6 @@ extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
@@ -2652,25 +2654,12 @@ extern void sched_set_cpu_cstate(int cpu, int cstate,
			 int wakeup_energy, int wakeup_latency);
extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
				int wakeup_energy, int wakeup_latency);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern u64 sched_ktime_clock(void);
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);

#else /* CONFIG_SCHED_HMP */
static inline void free_task_load_ptrs(struct task_struct *p) { }

static inline u64 sched_ktime_clock(void)
{
	return 0;
}

static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
	return 0;
}

static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
	return -EINVAL;
@@ -2682,8 +2671,6 @@ static inline unsigned long sched_get_busy(int cpu)
static inline void sched_get_cpus_busy(struct sched_load *busy,
				       const struct cpumask *query_cpus) {};

static inline void sched_set_io_is_busy(int val) {};

static inline int sched_set_boost(int enable)
{
	return -EINVAL;
@@ -2694,9 +2681,6 @@ static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
	return 0;
}

static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
					u32 fmin, u32 fmax) { }

static inline void
sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
{
@@ -2708,6 +2692,31 @@ static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
}
#endif /* CONFIG_SCHED_HMP */

#ifdef CONFIG_SCHED_WALT
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void sched_set_io_is_busy(int val);
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
					  u32 fmax);
#else
static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
	return 0;
}
static inline void sched_set_io_is_busy(int val) {};
#endif /* CONFIG_SCHED_WALT */

#ifndef CONFIG_SCHED_WALT
#ifndef CONFIG_SCHED_HMP
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
					u32 fmin, u32 fmax) { }
#endif /* CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_WALT */

#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
@@ -2962,7 +2971,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_SCHED_WALT
extern void sched_exit(struct task_struct *p);
#else
static inline void sched_exit(struct task_struct *p) { }
+7 −5
Original line number Diff line number Diff line
@@ -25,8 +25,13 @@ extern unsigned int sysctl_sched_cstate_aware;
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_walt_init_task_load_pct;
extern unsigned int sysctl_sched_walt_cpu_high_irqload;
extern unsigned int sysctl_sched_init_task_load_pct;
#endif

#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
#endif

#ifdef CONFIG_SCHED_HMP
@@ -43,8 +48,6 @@ extern int sysctl_sched_freq_dec_notify;
extern unsigned int sysctl_sched_freq_reporting_policy;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_init_task_load_pct;
extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
@@ -57,7 +60,6 @@ extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_restrict_cluster_spill;
extern unsigned int sysctl_sched_new_task_windows;
extern unsigned int sysctl_sched_pred_alert_freq;
extern unsigned int sysctl_sched_freq_aggregate;
extern unsigned int sysctl_sched_enable_thread_grouping;
Loading