Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c98b8a18 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Update overutilized status after migrating tasks"

parents cd67c993 4cb51ad4
Loading
Loading
Loading
Loading
+29 −21
Original line number Diff line number Diff line
@@ -6997,8 +6997,8 @@ static void sched_update_down_migrate_values(int cap_margin_levels,
	}
}

static int sched_update_updown_migrate_values(unsigned int *data,
					int cap_margin_levels, int ret)
static void sched_update_updown_migrate_values(unsigned int *data,
					      int cap_margin_levels)
{
	int i, cpu;
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];
@@ -7010,15 +7010,10 @@ static int sched_update_updown_migrate_values(unsigned int *data,
	}

	if (data == &sysctl_sched_capacity_margin_up[0])
		sched_update_up_migrate_values(cap_margin_levels,
							cluster_cpus);
	else if (data == &sysctl_sched_capacity_margin_down[0])
		sched_update_up_migrate_values(cap_margin_levels, cluster_cpus);
	else
		sched_update_down_migrate_values(cap_margin_levels,
						 cluster_cpus);
	else
		ret = -EINVAL;

	return ret;
}

int sched_updown_migrate_handler(struct ctl_table *table, int write,
@@ -7044,6 +7039,16 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,
		goto unlock_mutex;
	}

	if (!write) {
		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
		goto unlock_mutex;
	}

	/*
	 * Cache the old values so that they can be restored
	 * if either the write fails (for example out of range values)
	 * or the downmigrate and upmigrate are not in sync.
	 */
	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	if (!old_val) {
		ret = -ENOMEM;
@@ -7054,7 +7059,11 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,

	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);

	if (!ret && write) {
	if (ret) {
		memcpy(data, old_val, table->maxlen);
		goto free_old_val;
	}

	for (i = 0; i < cap_margin_levels; i++) {
		if (sysctl_sched_capacity_margin_up[i] >
				sysctl_sched_capacity_margin_down[i]) {
@@ -7064,9 +7073,8 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,
		}
	}

		ret = sched_update_updown_migrate_values(data,
						cap_margin_levels, ret);
	}
	sched_update_updown_migrate_values(data, cap_margin_levels);

free_old_val:
	kfree(old_val);
unlock_mutex:
+8 −0
Original line number Diff line number Diff line
@@ -8578,6 +8578,7 @@ static void attach_one_task(struct rq *rq, struct task_struct *p)
	rq_lock(rq, &rf);
	update_rq_clock(rq);
	attach_task(rq, p);
	update_overutilized_status(rq);
	rq_unlock(rq, &rf);
}

@@ -8601,6 +8602,13 @@ static void attach_tasks(struct lb_env *env)
		attach_task(env->dst_rq, p);
	}

	/*
	 * The enqueue_task_fair only updates the overutilized status
	 * for the waking tasks. Since multiple tasks may get migrated
	 * from load balancer, instead of doing it there, update the
	 * overutilized status here at the end.
	 */
	update_overutilized_status(env->dst_rq);
	rq_unlock(env->dst_rq, &rf);
}

+5 −1
Original line number Diff line number Diff line
@@ -3395,7 +3395,11 @@ static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp,
					   int *valp, int write, void *data)
{
	if (write) {
		if (*negp || *lvalp == 0)
		/*
		 * The sched_upmigrate/sched_downmigrate tunables are
		 * accepted in percentage. Limit them to 100.
		 */
		if (*negp || *lvalp == 0 || *lvalp > 100)
			return -EINVAL;
		*valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp;
	} else {