Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64b577b9 authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala
Browse files

sched: Add snapshot of Window Assisted Load Tracking (WALT)



This snapshot is taken from msm-4.19 as of commit 5debecbe7195
("trace: filter out spurious preemption and IRQs disable traces").

Change-Id: I8fab4084971baadcaa037f40ab549fc073a4b1ea
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 6df025a0
Loading
Loading
Loading
Loading
+59 −0
Original line number Diff line number Diff line
@@ -183,9 +183,65 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif

#ifdef CONFIG_SCHED_WALT
static ssize_t sched_load_boost_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	ssize_t rc;
	int boost;
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;

	boost = per_cpu(sched_load_boost, cpuid);
	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", boost);

	return rc;
}

static ssize_t __ref sched_load_boost_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	int err;
	int boost;
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;

	err = kstrtoint(strstrip((char *)buf), 0, &boost);
	if (err)
		return err;

	/*
	 * -100 is low enough to cancel out CPU's load and make it near zro.
	 * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt}
	 * can take without overflow.
	 */
	if (boost < -100 || boost > 1000)
		return -EINVAL;

	per_cpu(sched_load_boost, cpuid) = boost;

	return count;
}

static DEVICE_ATTR_RW(sched_load_boost);

static struct attribute *sched_cpu_attrs[] = {
	&dev_attr_sched_load_boost.attr,
	NULL
};

static struct attribute_group sched_cpu_attr_group = {
	.attrs = sched_cpu_attrs,
};
#endif

static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_WALT
	&sched_cpu_attr_group,
#endif
	NULL
};
@@ -193,6 +249,9 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_WALT
	&sched_cpu_attr_group,
#endif
	NULL
};
+57 −0
Original line number Diff line number Diff line
@@ -1459,6 +1459,56 @@ static const struct file_operations proc_pid_sched_operations = {

#endif

/*
 * Print out various scheduling related per-task fields:
 */

#ifdef CONFIG_SCHED_WALT
extern int __weak sched_wake_up_idle_show(struct seq_file *m, void *v);
extern ssize_t __weak sched_wake_up_idle_write(struct file *file,
		const char __user *buf, size_t count, loff_t *offset);
extern int __weak sched_wake_up_idle_open(struct inode *inode,
						struct file *filp);

static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
	.open		= sched_wake_up_idle_open,
	.read		= seq_read,
	.write		= sched_wake_up_idle_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

extern int __weak sched_init_task_load_show(struct seq_file *m, void *v);
extern ssize_t __weak
sched_init_task_load_write(struct file *file, const char __user *buf,
					size_t count, loff_t *offset);
extern int __weak
sched_init_task_load_open(struct inode *inode, struct file *filp);

static const struct file_operations proc_pid_sched_init_task_load_operations = {
	.open		= sched_init_task_load_open,
	.read		= seq_read,
	.write		= sched_init_task_load_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

extern int __weak sched_group_id_show(struct seq_file *m, void *v);
extern ssize_t __weak
sched_group_id_write(struct file *file, const char __user *buf,
					size_t count, loff_t *offset);
extern int __weak sched_group_id_open(struct inode *inode, struct file *filp);

static const struct file_operations proc_pid_sched_group_id_operations = {
	.open		= sched_group_id_open,
	.read		= seq_read,
	.write		= sched_group_id_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

#endif	/* CONFIG_SCHED_WALT */

#ifdef CONFIG_SCHED_AUTOGROUP
/*
 * Print out autogroup related information:
@@ -3011,6 +3061,13 @@ static const struct pid_entry tgid_base_stuff[] = {
	ONE("status",     S_IRUGO, proc_pid_status),
	ONE("personality", S_IRUSR, proc_pid_personality),
	ONE("limits",	  S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_WALT
	REG("sched_wake_up_idle", 00644,
				proc_pid_sched_wake_up_idle_operations),
	REG("sched_init_task_load", 00644,
				proc_pid_sched_init_task_load_operations),
	REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
#endif
#ifdef CONFIG_SCHED_DEBUG
	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
+3 −0
Original line number Diff line number Diff line
@@ -69,6 +69,9 @@ enum cpuhp_state {
	CPUHP_SLAB_PREPARE,
	CPUHP_MD_RAID5_PREPARE,
	CPUHP_RCUTREE_PREP,
#ifdef CONFIG_SCHED_WALT
	CPUHP_CORE_CTL_ISOLATION_DEAD,
#endif
	CPUHP_CPUIDLE_COUPLED_PREPARE,
	CPUHP_POWERPC_PMAC_PREPARE,
	CPUHP_POWERPC_MMU_CTX_PREPARE,
+157 −0
Original line number Diff line number Diff line
@@ -117,6 +117,18 @@ struct task_group;
					 (task->flags & PF_FROZEN) == 0 && \
					 (task->state & TASK_NOLOAD) == 0)

/*
 * Enum for display driver to provide varying refresh rates
 */
enum fps {
	FPS0 = 0,
	FPS30 = 30,
	FPS48 = 48,
	FPS60 = 60,
	FPS90 = 90,
	FPS120 = 120,
};

#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

/*
@@ -212,6 +224,21 @@ struct task_group;
/* Task command name length: */
#define TASK_COMM_LEN			16

enum task_event {
	PUT_PREV_TASK   = 0,
	PICK_NEXT_TASK  = 1,
	TASK_WAKE       = 2,
	TASK_MIGRATE    = 3,
	TASK_UPDATE     = 4,
	IRQ_UPDATE      = 5,
};

/* Note: this need to be in sync with migrate_type_names array */
enum migrate_types {
	GROUP_TO_RQ,
	RQ_TO_GROUP,
};

extern void scheduler_tick(void);

#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
@@ -478,6 +505,89 @@ struct sched_entity {
#endif
};

struct cpu_cycle_counter_cb {
	u64 (*get_cpu_cycle_counter)(int cpu);
};

DECLARE_PER_CPU_READ_MOSTLY(int, sched_load_boost);

#ifdef CONFIG_SCHED_WALT
extern void __weak sched_exit(struct task_struct *p);
extern int __weak
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void __weak
sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax);
extern void __weak free_task_load_ptrs(struct task_struct *p);
extern void __weak sched_set_refresh_rate(enum fps fps);

#define RAVG_HIST_SIZE_MAX  5
#define NUM_BUSY_BUCKETS 10

/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
	/*
	 * 'mark_start' marks the beginning of an event (task waking up, task
	 * starting to execute, task being preempted) within a window
	 *
	 * 'sum' represents how runnable a task has been within current
	 * window. It incorporates both running time and wait time and is
	 * frequency scaled.
	 *
	 * 'sum_history' keeps track of history of 'sum' seen over previous
	 * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
	 * ignored.
	 *
	 * 'demand' represents maximum sum seen over previous
	 * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
	 * demand for tasks.
	 *
	 * 'curr_window_cpu' represents task's contribution to cpu busy time on
	 * various CPUs in the current window
	 *
	 * 'prev_window_cpu' represents task's contribution to cpu busy time on
	 * various CPUs in the previous window
	 *
	 * 'curr_window' represents the sum of all entries in curr_window_cpu
	 *
	 * 'prev_window' represents the sum of all entries in prev_window_cpu
	 *
	 * 'pred_demand' represents task's current predicted cpu busy time
	 *
	 * 'busy_buckets' groups historical busy time into different buckets
	 * used for prediction
	 *
	 * 'demand_scaled' represents task's demand scaled to 1024
	 */
	u64 mark_start;
	u32 sum, demand;
	u32 coloc_demand;
	u32 sum_history[RAVG_HIST_SIZE_MAX];
	u32 *curr_window_cpu, *prev_window_cpu;
	u32 curr_window, prev_window;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
	u16 demand_scaled;
	u16 pred_demand_scaled;
	u64 active_time;
	u64 last_win_size;
};
#else
static inline void sched_exit(struct task_struct *p) { }

static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
	return 0;
}

static inline void free_task_load_ptrs(struct task_struct *p) { }

static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
					u32 fmin, u32 fmax) { }

static inline void sched_set_refresh_rate(enum fps fps) { }
#endif /* CONFIG_SCHED_WALT */

struct sched_rt_entity {
	struct list_head		run_list;
	unsigned long			timeout;
@@ -675,6 +785,20 @@ struct task_struct {
	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
#ifdef CONFIG_SCHED_WALT
	u64 last_sleep_ts;
	bool wake_up_idle;
	struct ravg ravg;
	u32 init_load_pct;
	u64 last_wake_ts;
	u64 last_enqueued_ts;
	struct related_thread_group *grp;
	struct list_head grp_list;
	u64 cpu_cycles;
	bool misfit;
	u8 unfilter;
#endif

#ifdef CONFIG_CGROUP_SCHED
	struct task_group		*sched_task_group;
#endif
@@ -2000,4 +2124,37 @@ int sched_trace_rq_cpu(struct rq *rq);

const struct cpumask *sched_trace_rd_span(struct root_domain *rd);

#ifdef CONFIG_SCHED_WALT
#define PF_WAKE_UP_IDLE	1
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
{
	return p->wake_up_idle;
}

static inline int sched_set_wake_up_idle(struct task_struct *p,
						int wake_up_idle)
{
	p->wake_up_idle = !!wake_up_idle;
	return 0;
}

static inline void set_wake_up_idle(bool enabled)
{
	current->wake_up_idle = enabled;
}
#else
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
{
	return 0;
}

static inline int sched_set_wake_up_idle(struct task_struct *p,
						int wake_up_idle)
{
	return 0;
}

static inline void set_wake_up_idle(bool enabled) {}
#endif

#endif
+31 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved.
 */

#ifndef __CORE_CTL_H
#define __CORE_CTL_H

#define MAX_CPUS_PER_CLUSTER 6
#define MAX_CLUSTERS 3

struct core_ctl_notif_data {
	unsigned int nr_big;
	unsigned int coloc_load_pct;
	unsigned int ta_util_pct[MAX_CLUSTERS];
	unsigned int cur_cap_pct[MAX_CLUSTERS];
};

#ifdef CONFIG_SCHED_WALT
extern int __weak core_ctl_set_boost(bool boost);
extern void __weak core_ctl_notifier_register(struct notifier_block *n);
extern void __weak core_ctl_notifier_unregister(struct notifier_block *n);
#else
static inline int core_ctl_set_boost(bool boost)
{
	return 0;
}
static inline void core_ctl_notifier_register(struct notifier_block *n) {}
static inline void core_ctl_notifier_unregister(struct notifier_block *n) {}
#endif
#endif
Loading