Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 708b0099 authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala
Browse files

sched: Add snapshot of sched_{up,down}migrate knobs



This snapshot is taken from msm-4.14 as of commit 40bd7878710ab4f
("sched: Don't fail isolation request for an already isolated CPU").

Change-Id: I9a52e67862ee5086880884128102aa4f8efb04ce
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 29595937
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -20,12 +20,18 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
enum { sysctl_hung_task_timeout_secs = 0 };
#endif

#define MAX_CLUSTERS 3
/* MAX_MARGIN_LEVELS should be one less than MAX_CLUSTERS */
#define MAX_MARGIN_LEVELS (MAX_CLUSTERS - 1)

extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_cstate_aware;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
@@ -98,6 +104,10 @@ extern int sched_rt_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

extern int sched_updown_migrate_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);

extern int sysctl_numa_balancing(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp,
				 loff_t *ppos);
+3 −0
Original line number Diff line number Diff line
@@ -63,6 +63,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
				      void __user *, size_t *, loff_t *);
extern int proc_do_large_bitmap(struct ctl_table *, int,
				void __user *, size_t *, loff_t *);
extern int proc_douintvec_capacity(struct ctl_table *table, int write,
				   void __user *buffer, size_t *lenp,
				   loff_t *ppos);

/*
 * Register a set of sysctl names by calling register_sysctl_table
+139 −0
Original line number Diff line number Diff line
@@ -6800,6 +6800,145 @@ void sched_move_task(struct task_struct *tsk)
	task_rq_unlock(rq, tsk, &rf);
}

#ifdef CONFIG_PROC_SYSCTL
static int find_capacity_margin_levels(void)
{
	int cpu, max_clusters;

	for (cpu = max_clusters = 0; cpu < num_possible_cpus();) {
		cpu += cpumask_weight(topology_core_cpumask(cpu));
		max_clusters++;
	}

	/*
	 * Capacity margin levels is number of clusters available in
	 * the system subtracted by 1.
	 */
	return max_clusters - 1;
}

static void sched_update_up_migrate_values(int cap_margin_levels,
				const struct cpumask *cluster_cpus[])
{
	int i, cpu;

	if (cap_margin_levels > 1) {
		/*
		 * No need to worry about CPUs in last cluster
		 * if there are more than 2 clusters in the system
		 */
		for (i = 0; i < cap_margin_levels; i++)
			if (cluster_cpus[i])
				for_each_cpu(cpu, cluster_cpus[i])
					sched_capacity_margin_up[cpu] =
					sysctl_sched_capacity_margin_up[i];
	} else {
		for_each_possible_cpu(cpu)
			sched_capacity_margin_up[cpu] =
				sysctl_sched_capacity_margin_up[0];
	}
}

static void sched_update_down_migrate_values(int cap_margin_levels,
				const struct cpumask *cluster_cpus[])
{
	int i, cpu;

	if (cap_margin_levels > 1) {
		/*
		 * Skip first cluster as down migrate value isn't needed
		 */
		for (i = 0; i < cap_margin_levels; i++)
			if (cluster_cpus[i+1])
				for_each_cpu(cpu, cluster_cpus[i+1])
					sched_capacity_margin_down[cpu] =
					sysctl_sched_capacity_margin_down[i];
	} else {
		for_each_possible_cpu(cpu)
			sched_capacity_margin_down[cpu] =
				sysctl_sched_capacity_margin_down[0];
	}
}

static int sched_update_updown_migrate_values(unsigned int *data,
					int cap_margin_levels, int ret)
{
	int i, cpu;
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];

	for (i = cpu = 0; (!cluster_cpus[i]) &&
				cpu < num_possible_cpus(); i++) {
		cluster_cpus[i] = topology_core_cpumask(cpu);
		cpu += cpumask_weight(topology_core_cpumask(cpu));
	}

	if (data == &sysctl_sched_capacity_margin_up[0])
		sched_update_up_migrate_values(cap_margin_levels,
							cluster_cpus);
	else if (data == &sysctl_sched_capacity_margin_down[0])
		sched_update_down_migrate_values(cap_margin_levels,
							cluster_cpus);
	else
		ret = -EINVAL;

	return ret;
}

int sched_updown_migrate_handler(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret, i;
	unsigned int *data = (unsigned int *)table->data;
	unsigned int *old_val;
	static DEFINE_MUTEX(mutex);
	static int cap_margin_levels = -1;

	mutex_lock(&mutex);

	if (cap_margin_levels == -1 ||
		table->maxlen != (sizeof(unsigned int) * cap_margin_levels)) {
		cap_margin_levels = find_capacity_margin_levels();
		table->maxlen = sizeof(unsigned int) * cap_margin_levels;
	}

	if (cap_margin_levels <= 0) {
		ret = -EINVAL;
		goto unlock_mutex;
	}

	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	if (!old_val) {
		ret = -ENOMEM;
		goto unlock_mutex;
	}

	memcpy(old_val, data, table->maxlen);

	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);

	if (!ret && write) {
		for (i = 0; i < cap_margin_levels; i++) {
			if (sysctl_sched_capacity_margin_up[i] >
					sysctl_sched_capacity_margin_down[i]) {
				memcpy(data, old_val, table->maxlen);
				ret = -EINVAL;
				goto free_old_val;
			}
		}

		ret = sched_update_updown_migrate_values(data,
						cap_margin_levels, ret);
	}
free_old_val:
	kfree(old_val);
unlock_mutex:
	mutex_unlock(&mutex);

	return ret;
}
#endif

static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct task_group, css) : NULL;
+23 −4
Original line number Diff line number Diff line
@@ -171,6 +171,16 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
 */
unsigned int capacity_margin				= 1280;

/* Migration margins */
unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */
unsigned int sched_capacity_margin_up[NR_CPUS] = {
			[0 ... NR_CPUS-1] = 1078}; /* ~5% margin */
unsigned int sched_capacity_margin_down[NR_CPUS] = {
			[0 ... NR_CPUS-1] = 1205}; /* ~15% margin */

#ifdef CONFIG_SCHED_WALT
/* 1ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_boost = 51;
@@ -3802,9 +3812,18 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
	trace_sched_util_est_task(p, &p->se.avg);
}

static inline int task_fits_capacity(struct task_struct *p, long capacity)
static inline int task_fits_capacity(struct task_struct *p,
					long capacity,
					int cpu)
{
	return capacity * 1024 > task_util_est(p) * capacity_margin;
	unsigned int margin;

	if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
		margin = sysctl_sched_capacity_margin_down[task_cpu(p)];
	else
		margin = sysctl_sched_capacity_margin_up[task_cpu(p)];

	return capacity * 1024 > task_util_est(p) * margin;
}

static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
@@ -3817,7 +3836,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
		return;
	}

	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
	if (task_fits_capacity(p, capacity_of(cpu_of(rq)), cpu_of(rq))) {
		rq->misfit_task_load = 0;
		return;
	}
@@ -6888,7 +6907,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
	/* Bring task utilization in sync with prev_cpu */
	sync_entity_load_avg(&p->se);

	return !task_fits_capacity(p, min_cap);
	return !task_fits_capacity(p, min_cap, cpu);
}

/*
+2 −0
Original line number Diff line number Diff line
@@ -87,6 +87,8 @@ struct rq;
struct cpuidle_state;

extern __read_mostly bool sched_predl;
extern unsigned int sched_capacity_margin_up[NR_CPUS];
extern unsigned int sched_capacity_margin_down[NR_CPUS];

#ifdef CONFIG_SCHED_WALT
extern unsigned int sched_ravg_window;
Loading