Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ecd0abab authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Add timeout for detach_tasks() in load balance"

parents 3f06319b a428455e
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -2092,6 +2092,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
	enum { cpuset, possible, fail, bug } state = cpuset;
	int dest_cpu;
	int isolated_candidate = -1;
	int backup_cpu = -1;
	unsigned int max_nr = UINT_MAX;

	/*
	 * If the node that the CPU is on has been offlined, cpu_to_node()
@@ -2107,9 +2109,18 @@ static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
				continue;
			if (cpu_isolated(dest_cpu))
				continue;
			if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
			if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) {
				if (cpu_rq(dest_cpu)->nr_running < 32)
					return dest_cpu;
				if (cpu_rq(dest_cpu)->nr_running > max_nr)
					continue;
				backup_cpu = dest_cpu;
				max_nr = cpu_rq(dest_cpu)->nr_running;
			}
		}

		if (backup_cpu != -1)
			return backup_cpu;
	}

	for (;;) {
+32 −5
Original line number Diff line number Diff line
@@ -7765,8 +7765,27 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,

	/* Bail out if no candidate was found. */
	weight = cpumask_weight(candidates);
	if (!weight)
	if (!weight) {
		/*
		 * Don't overload the previous CPU if it had already
		 * more runnable tasks. Fallback to a CPU with lower
		 * number of tasks.
		 */
		if (cpu_rq(prev_cpu)->nr_running > 32) {
			int i;
			unsigned int best_nr = UINT_MAX;

			for_each_cpu(i, cpu_active_mask) {
				if (!cpumask_test_cpu(i, &p->cpus_allowed))
					continue;
				if (cpu_rq(i)->nr_running < best_nr) {
					best_nr = cpu_rq(i)->nr_running;
					best_energy_cpu = i;
				}
			}
		}
		goto unlock;
	}

	/* If there is only one sensible candidate, select it now. */
	cpu = cpumask_first(candidates);
@@ -8829,17 +8848,21 @@ static int detach_tasks(struct lb_env *env)
	unsigned long load = 0;
	int detached = 0;
	int orig_loop = env->loop;
	u64 start_t = rq_clock(env->src_rq);

	lockdep_assert_held(&env->src_rq->lock);

	if (env->imbalance <= 0)
		return 0;

	if (env->src_rq->nr_running < 32) {
		if (!same_cluster(env->dst_cpu, env->src_cpu))
			env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;

	if (capacity_orig_of(env->dst_cpu) < capacity_orig_of(env->src_cpu))
		if (capacity_orig_of(env->dst_cpu) <
				capacity_orig_of(env->src_cpu))
			env->flags |= LBF_IGNORE_BIG_TASKS;
	}

redo:
	while (!list_empty(tasks)) {
@@ -8857,6 +8880,10 @@ static int detach_tasks(struct lb_env *env)
		if (env->loop > env->loop_max)
			break;

		/* Abort the loop, if we spent more than 5 msec */
		if (rq_clock(env->src_rq) - start_t > 5000000)
			break;

		/* take a breather every nr_migrate tasks */
		if (env->loop > env->loop_break) {
			env->loop_break += sched_nr_migrate_break;