Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5db4b9c authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Satya Durga Srinivasu Prabhala
Browse files

sched: Handle partial write failures in sched_updown_migrate_handler



The sched_upmigrate/sched_downmigrate tunable is set by passing
two values on a tri cluster system. proc_douintvec_capacity() can
return an error when any of the user specified value is out of range.
However it may result in partial update to sysctl data when the 1st
value is within the range but the 2nd value is not. Since an error
is returned to the tunable write system call, discard the partial
update to the tunable.

Before this patch,

> cat /proc/sys/kernel/sched_upmigrate
> 95 95
> echo 99 0 > /proc/sys/kernel/sched_upmigrate
> 99 95

After this patch,

> cat /proc/sys/kernel/sched_upmigrate
> 95 95
> echo 99 0 > /proc/sys/kernel/sched_upmigrate
> 95 95

This patch also refactors sched_updown_migrate_handler() to allocate
the temporary buffer only for write case.

Change-Id: I3c6b7eb12c4f38823022fa6420f1d9c3c8d05796
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 913365b6
Loading
Loading
Loading
Loading
+29 −21
Original line number Original line Diff line number Diff line
@@ -6997,8 +6997,8 @@ static void sched_update_down_migrate_values(int cap_margin_levels,
	}
	}
}
}


static int sched_update_updown_migrate_values(unsigned int *data,
static void sched_update_updown_migrate_values(unsigned int *data,
					int cap_margin_levels, int ret)
					      int cap_margin_levels)
{
{
	int i, cpu;
	int i, cpu;
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];
@@ -7010,15 +7010,10 @@ static int sched_update_updown_migrate_values(unsigned int *data,
	}
	}


	if (data == &sysctl_sched_capacity_margin_up[0])
	if (data == &sysctl_sched_capacity_margin_up[0])
		sched_update_up_migrate_values(cap_margin_levels,
		sched_update_up_migrate_values(cap_margin_levels, cluster_cpus);
							cluster_cpus);
	else
	else if (data == &sysctl_sched_capacity_margin_down[0])
		sched_update_down_migrate_values(cap_margin_levels,
		sched_update_down_migrate_values(cap_margin_levels,
						 cluster_cpus);
						 cluster_cpus);
	else
		ret = -EINVAL;

	return ret;
}
}


int sched_updown_migrate_handler(struct ctl_table *table, int write,
int sched_updown_migrate_handler(struct ctl_table *table, int write,
@@ -7044,6 +7039,16 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,
		goto unlock_mutex;
		goto unlock_mutex;
	}
	}


	if (!write) {
		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
		goto unlock_mutex;
	}

	/*
	 * Cache the old values so that they can be restored
	 * if either the write fails (for example out of range values)
	 * or the downmigrate and upmigrate are not in sync.
	 */
	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	if (!old_val) {
	if (!old_val) {
		ret = -ENOMEM;
		ret = -ENOMEM;
@@ -7054,7 +7059,11 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,


	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);


	if (!ret && write) {
	if (ret) {
		memcpy(data, old_val, table->maxlen);
		goto free_old_val;
	}

	for (i = 0; i < cap_margin_levels; i++) {
	for (i = 0; i < cap_margin_levels; i++) {
		if (sysctl_sched_capacity_margin_up[i] >
		if (sysctl_sched_capacity_margin_up[i] >
				sysctl_sched_capacity_margin_down[i]) {
				sysctl_sched_capacity_margin_down[i]) {
@@ -7064,9 +7073,8 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,
		}
		}
	}
	}


		ret = sched_update_updown_migrate_values(data,
	sched_update_updown_migrate_values(data, cap_margin_levels);
						cap_margin_levels, ret);

	}
free_old_val:
free_old_val:
	kfree(old_val);
	kfree(old_val);
unlock_mutex:
unlock_mutex: