Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb5b6b3e authored by Vikram Mulukutla's avatar Vikram Mulukutla Committed by Satya Durga Srinivasu Prabhala
Browse files

sched: Add snapshot of Window Assisted Load Tracking (WALT)



This snapshot is taken from msm-4.9 as of commit 935c3e96d14c14d
(Revert "sched/fair: Limit sync wakeup bias to waker cpu").

Change-Id: I53b79289b37b7a352ae2a04b03cdadb3f2e564c6
Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>
[satyap@codeaurora.org:
1. Resolve merge conflicts
2. Fix indentation issues
3. Fix compilation issues for ARCH=um & SDM855
4. Comment out parts of code for compilation
5. Remove stubs introduced by commit: d8e3774e
(cpufreq: Various changes to allow sugov to compile)]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent b4695fbf
Loading
Loading
Loading
Loading
+76 −14
Original line number Diff line number Diff line
@@ -175,6 +175,12 @@ enum task_event {
	IRQ_UPDATE	= 5,
};

/* Note: this need to be in sync with migrate_type_names array */
enum migrate_types {
	GROUP_TO_RQ,
	RQ_TO_GROUP,
};

extern cpumask_var_t			cpu_isolated_map;

extern void scheduler_tick(void);
@@ -419,8 +425,35 @@ struct sched_entity {
#endif
};

struct sched_load {
	unsigned long prev_load;
	unsigned long new_task_load;
	unsigned long predicted_load;
};

struct cpu_cycle_counter_cb {
	u64 (*get_cpu_cycle_counter)(int cpu);
};

#define MAX_NUM_CGROUP_COLOC_ID	20

extern DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);

#ifdef CONFIG_SCHED_WALT
extern void sched_exit(struct task_struct *p);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void sched_set_io_is_busy(int val);
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
					  u32 fmax);
extern int sched_set_boost(int enable);
extern void free_task_load_ptrs(struct task_struct *p);

#define RAVG_HIST_SIZE_MAX  5
#define NUM_BUSY_BUCKETS 10

/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
@@ -440,19 +473,49 @@ struct ravg {
	 * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
	 * demand for tasks.
	 *
	 * 'curr_window' represents task's contribution to cpu busy time
	 * statistics (rq->curr_runnable_sum) in current window
	 * 'curr_window_cpu' represents task's contribution to cpu busy time on
	 * various CPUs in the current window
	 *
	 * 'prev_window_cpu' represents task's contribution to cpu busy time on
	 * various CPUs in the previous window
	 *
	 * 'curr_window' represents the sum of all entries in curr_window_cpu
	 *
	 * 'prev_window' represents the sum of all entries in prev_window_cpu
	 *
	 * 'prev_window' represents task's contribution to cpu busy time
	 * statistics (rq->prev_runnable_sum) in previous window
	 * 'pred_demand' represents task's current predicted cpu busy time
	 *
	 * 'busy_buckets' groups historical busy time into different buckets
	 * used for prediction
	 */
	u64 mark_start;
	u32 sum, demand;
	u32 coloc_demand;
	u32 sum_history[RAVG_HIST_SIZE_MAX];
	u32 *curr_window_cpu, *prev_window_cpu;
	u32 curr_window, prev_window;
	u16 active_windows;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
};
#endif
#else
static inline void sched_exit(struct task_struct *p) { }
static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
	return 0;
}
static inline void sched_set_io_is_busy(int val) {};

static inline int sched_set_boost(int enable)
{
	return -EINVAL;
}
static inline void free_task_load_ptrs(struct task_struct *p) { }

static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
					u32 fmin, u32 fmax) { }
#endif /* CONFIG_SCHED_WALT */

struct sched_rt_entity {
	struct list_head		run_list;
@@ -606,6 +669,7 @@ struct task_struct {
	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
	u64 last_sleep_ts;
#ifdef CONFIG_SCHED_WALT
	struct ravg ravg;
	/*
@@ -613,7 +677,13 @@ struct task_struct {
	 * of this task
	 */
	u32 init_load_pct;
	u64 last_sleep_ts;
	u64 last_wake_ts;
	u64 last_switch_out_ts;
	u64 last_cpu_selected_ts;
	struct related_thread_group *grp;
	struct list_head grp_list;
	u64 cpu_cycles;
	bool misfit;
#endif

#ifdef CONFIG_CGROUP_SCHED
@@ -1169,11 +1239,6 @@ struct task_struct {
	 */
};

static inline int sched_set_boost(int enable)
{
	return 0;
}

static inline struct pid *task_pid(struct task_struct *task)
{
	return task->pids[PIDTYPE_PID].pid;
@@ -1723,9 +1788,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);

#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
#define SCHED_CPUFREQ_PL (1U << 5)

#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif
+27 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#ifndef __CORE_CTL_H
#define __CORE_CTL_H

#ifdef CONFIG_SCHED_CORE_CTL
void core_ctl_check(u64 wallclock);
int core_ctl_set_boost(bool boost);
#else
static inline void core_ctl_check(u64 wallclock) {}
static inline int core_ctl_set_boost(bool boost)
{
	return 0;
}
#endif
#endif
+4 −0
Original line number Diff line number Diff line
@@ -11,6 +11,10 @@
#define SCHED_CPUFREQ_RT	(1U << 0)
#define SCHED_CPUFREQ_DL	(1U << 1)
#define SCHED_CPUFREQ_IOWAIT	(1U << 2)
#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
#define SCHED_CPUFREQ_WALT (1U << 4)
#define SCHED_CPUFREQ_PL	(1U << 5)
#define SCHED_CPUFREQ_EARLY_DET (1U << 6)

#define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)

+14 −0
Original line number Diff line number Diff line
@@ -22,6 +22,20 @@ extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);

#ifdef CONFIG_SMP
extern unsigned int sched_get_cpu_util(int cpu);
extern u64 sched_get_cpu_last_busy_time(int cpu);
#else
static inline unsigned int sched_get_cpu_util(int cpu)
{
	return 0;
}
static inline u64 sched_get_cpu_last_busy_time(int cpu)
{
	return 0;
}
#endif

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
+10 −1
Original line number Diff line number Diff line
@@ -29,7 +29,16 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_walt_init_task_load_pct;
extern unsigned int sysctl_sched_walt_cpu_high_irqload;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
			 loff_t *ppos);

#endif

enum sched_tunable_scaling {
Loading