Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dcc0e189 authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar
Browse files

sched: core: Use sched_clusters for updown migration handler



Currently, sched updown migration handler derives cluster topology
based on arch topology, the cluster information is already populated
in walt sched_cluster. So reuse it instead of deriving it again.

And move updown tunables support to under WALT.

Change-Id: Iddf4d18ddf75cc20637281d9889f671f42369513
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent d6a3e2d3
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -31,9 +31,9 @@ extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_cstate_aware;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_user_hint;
extern const int sched_user_hint_max;
extern unsigned int sysctl_sched_cpu_high_irqload;
@@ -70,6 +70,9 @@ sched_ravg_window_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
			 loff_t *ppos);

extern int sched_updown_migrate_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);
#endif

#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_DEBUG_PREEMPT)
@@ -131,10 +134,6 @@ extern int sched_rt_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

extern int sched_updown_migrate_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);

extern int sysctl_numa_balancing(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp,
				 loff_t *ppos);
+0 −149
Original line number Diff line number Diff line
@@ -6875,155 +6875,6 @@ void ia64_set_curr_task(int cpu, struct task_struct *p)
{
	cpu_curr(cpu) = p;
}

#endif

#ifdef CONFIG_PROC_SYSCTL
static int find_capacity_margin_levels(void)
{
	int cpu, max_clusters;

	for (cpu = max_clusters = 0; cpu < num_possible_cpus();) {
		cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
		max_clusters++;
	}

	/*
	 * Capacity margin levels is number of clusters available in
	 * the system subtracted by 1.
	 */
	return max_clusters - 1;
}

static void sched_update_up_migrate_values(int cap_margin_levels,
				const struct cpumask *cluster_cpus[])
{
	int i, cpu;

	if (cap_margin_levels > 1) {
		/*
		 * No need to worry about CPUs in last cluster
		 * if there are more than 2 clusters in the system
		 */
		for (i = 0; i < cap_margin_levels; i++)
			if (cluster_cpus[i])
				for_each_cpu(cpu, cluster_cpus[i])
					sched_capacity_margin_up[cpu] =
					sysctl_sched_capacity_margin_up[i];
	} else {
		for_each_possible_cpu(cpu)
			sched_capacity_margin_up[cpu] =
				sysctl_sched_capacity_margin_up[0];
	}
}

static void sched_update_down_migrate_values(int cap_margin_levels,
				const struct cpumask *cluster_cpus[])
{
	int i, cpu;

	if (cap_margin_levels > 1) {
		/*
		 * Skip last cluster as down migrate value isn't needed.
		 * Because there is no downmigration to it.
		 */
		for (i = 0; i < cap_margin_levels; i++)
			if (cluster_cpus[i])
				for_each_cpu(cpu, cluster_cpus[i])
					sched_capacity_margin_down[cpu] =
					sysctl_sched_capacity_margin_down[i];
	} else {
		for_each_possible_cpu(cpu)
			sched_capacity_margin_down[cpu] =
				sysctl_sched_capacity_margin_down[0];
	}
}

static void sched_update_updown_migrate_values(unsigned int *data,
					      int cap_margin_levels)
{
	int i, cpu;
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];

	for (i = cpu = 0; i < MAX_CLUSTERS &&
				cpu < num_possible_cpus(); i++) {
		cluster_cpus[i] = topology_possible_sibling_cpumask(cpu);
		cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
	}

	if (data == &sysctl_sched_capacity_margin_up[0])
		sched_update_up_migrate_values(cap_margin_levels, cluster_cpus);
	else
		sched_update_down_migrate_values(cap_margin_levels,
						 cluster_cpus);
}

int sched_updown_migrate_handler(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret, i;
	unsigned int *data = (unsigned int *)table->data;
	unsigned int *old_val;
	static DEFINE_MUTEX(mutex);
	static int cap_margin_levels = -1;

	mutex_lock(&mutex);

	if (cap_margin_levels == -1 ||
		table->maxlen != (sizeof(unsigned int) * cap_margin_levels)) {
		cap_margin_levels = find_capacity_margin_levels();
		table->maxlen = sizeof(unsigned int) * cap_margin_levels;
	}

	if (cap_margin_levels <= 0) {
		ret = -EINVAL;
		goto unlock_mutex;
	}

	if (!write) {
		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
		goto unlock_mutex;
	}

	/*
	 * Cache the old values so that they can be restored
	 * if either the write fails (for example out of range values)
	 * or the downmigrate and upmigrate are not in sync.
	 */
	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	if (!old_val) {
		ret = -ENOMEM;
		goto unlock_mutex;
	}

	memcpy(old_val, data, table->maxlen);

	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);

	if (ret) {
		memcpy(data, old_val, table->maxlen);
		goto free_old_val;
	}

	for (i = 0; i < cap_margin_levels; i++) {
		if (sysctl_sched_capacity_margin_up[i] >
				sysctl_sched_capacity_margin_down[i]) {
			memcpy(data, old_val, table->maxlen);
			ret = -EINVAL;
			goto free_old_val;
		}
	}

	sched_update_updown_migrate_values(data, cap_margin_levels);

free_old_val:
	kfree(old_val);
unlock_mutex:
	mutex_unlock(&mutex);

	return ret;
}
#endif

#ifdef CONFIG_CGROUP_SCHED
+0 −6
Original line number Diff line number Diff line
@@ -165,12 +165,6 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
 * (default: ~20%)
 */
unsigned int capacity_margin				= 1280;

/* Migration margins */
unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */
unsigned int sched_capacity_margin_up[NR_CPUS] = {
			[0 ... NR_CPUS-1] = 1078}; /* ~5% margin */
unsigned int sched_capacity_margin_down[NR_CPUS] = {
+106 −0
Original line number Diff line number Diff line
@@ -3722,3 +3722,109 @@ void sched_set_refresh_rate(enum fps fps)
	}
}
EXPORT_SYMBOL(sched_set_refresh_rate);

/* Migration margins */
unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */

#ifdef CONFIG_PROC_SYSCTL
static void sched_update_updown_migrate_values(bool up)
{
	int i = 0, cpu;
	struct sched_cluster *cluster;
	int cap_margin_levels = num_sched_clusters - 1;

	if (cap_margin_levels > 1) {
		/*
		 * No need to worry about CPUs in last cluster
		 * if there are more than 2 clusters in the system
		 */
		for_each_sched_cluster(cluster) {
			for_each_cpu(cpu, &cluster->cpus) {

				if (up)
					sched_capacity_margin_up[cpu] =
					sysctl_sched_capacity_margin_up[i];
				else
					sched_capacity_margin_down[cpu] =
					sysctl_sched_capacity_margin_down[i];
			}

			if (++i >= cap_margin_levels)
				break;
		}
	} else {
		for_each_possible_cpu(cpu) {
			if (up)
				sched_capacity_margin_up[cpu] =
					sysctl_sched_capacity_margin_up[0];
			else
				sched_capacity_margin_down[cpu] =
					sysctl_sched_capacity_margin_down[0];
		}
	}
}

int sched_updown_migrate_handler(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret, i;
	unsigned int *data = (unsigned int *)table->data;
	unsigned int *old_val;
	static DEFINE_MUTEX(mutex);
	int cap_margin_levels = num_sched_clusters ? num_sched_clusters - 1 : 0;

	if (cap_margin_levels <= 0)
		return -EINVAL;

	mutex_lock(&mutex);

	if (table->maxlen != (sizeof(unsigned int) * cap_margin_levels))
		table->maxlen = sizeof(unsigned int) * cap_margin_levels;

	if (!write) {
		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
		goto unlock_mutex;
	}

	/*
	 * Cache the old values so that they can be restored
	 * if either the write fails (for example out of range values)
	 * or the downmigrate and upmigrate are not in sync.
	 */
	old_val = kmemdup(data, table->maxlen, GFP_KERNEL);
	if (!old_val) {
		ret = -ENOMEM;
		goto unlock_mutex;
	}

	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);

	if (ret) {
		memcpy(data, old_val, table->maxlen);
		goto free_old_val;
	}

	for (i = 0; i < cap_margin_levels; i++) {
		if (sysctl_sched_capacity_margin_up[i] >
				sysctl_sched_capacity_margin_down[i]) {
			memcpy(data, old_val, table->maxlen);
			ret = -EINVAL;
			goto free_old_val;
		}
	}

	sched_update_updown_migrate_values(data ==
					&sysctl_sched_capacity_margin_up[0]);

free_old_val:
	kfree(old_val);
unlock_mutex:
	mutex_unlock(&mutex);

	return ret;
}
#endif
+0 −2
Original line number Diff line number Diff line
@@ -527,8 +527,6 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler	= sched_ravg_window_handler,
	},
#endif
#ifdef CONFIG_SMP
	{
		.procname	= "sched_upmigrate",
		.data		= &sysctl_sched_capacity_margin_up,