Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5547841 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Update overutilized status after migrating tasks"

parents 238f231a 758bcfa2
Loading
Loading
Loading
Loading
+29 −21
Original line number Diff line number Diff line
@@ -6902,8 +6902,8 @@ static void sched_update_down_migrate_values(int cap_margin_levels,
	}
}

static int sched_update_updown_migrate_values(unsigned int *data,
					int cap_margin_levels, int ret)
static void sched_update_updown_migrate_values(unsigned int *data,
					      int cap_margin_levels)
{
	int i, cpu;
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];
@@ -6915,15 +6915,10 @@ static int sched_update_updown_migrate_values(unsigned int *data,
	}

	if (data == &sysctl_sched_capacity_margin_up[0])
		sched_update_up_migrate_values(cap_margin_levels,
							cluster_cpus);
	else if (data == &sysctl_sched_capacity_margin_down[0])
		sched_update_up_migrate_values(cap_margin_levels, cluster_cpus);
	else
		sched_update_down_migrate_values(cap_margin_levels,
						 cluster_cpus);
	else
		ret = -EINVAL;

	return ret;
}

int sched_updown_migrate_handler(struct ctl_table *table, int write,
@@ -6949,6 +6944,16 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,
		goto unlock_mutex;
	}

	if (!write) {
		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
		goto unlock_mutex;
	}

	/*
	 * Cache the old values so that they can be restored
	 * if either the write fails (for example out of range values)
	 * or the downmigrate and upmigrate are not in sync.
	 */
	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	if (!old_val) {
		ret = -ENOMEM;
@@ -6959,7 +6964,11 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,

	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);

	if (!ret && write) {
	if (ret) {
		memcpy(data, old_val, table->maxlen);
		goto free_old_val;
	}

	for (i = 0; i < cap_margin_levels; i++) {
		if (sysctl_sched_capacity_margin_up[i] >
				sysctl_sched_capacity_margin_down[i]) {
@@ -6969,9 +6978,8 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write,
		}
	}

		ret = sched_update_updown_migrate_values(data,
						cap_margin_levels, ret);
	}
	sched_update_updown_migrate_values(data, cap_margin_levels);

free_old_val:
	kfree(old_val);
unlock_mutex:
+8 −0
Original line number Diff line number Diff line
@@ -9314,6 +9314,7 @@ static void attach_one_task(struct rq *rq, struct task_struct *p)
	rq_lock(rq, &rf);
	update_rq_clock(rq);
	attach_task(rq, p);
	update_overutilized_status(rq);
	rq_unlock(rq, &rf);
}

@@ -9337,6 +9338,13 @@ static void attach_tasks(struct lb_env *env)
		attach_task(env->dst_rq, p);
	}

	/*
	 * The enqueue_task_fair only updates the overutilized status
	 * for the waking tasks. Since multiple tasks may get migrated
	 * from load balancer, instead of doing it there, update the
	 * overutilized status here at the end.
	 */
	update_overutilized_status(env->dst_rq);
	rq_unlock(env->dst_rq, &rf);
}