Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 378dc16e authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ANDROID: sched/fair: Fix incorrect usage of RCU in CPU select path"

parents 6cee5368 d35fc8ec
Loading
Loading
Loading
Loading
+13 −13
Original line number Diff line number Diff line
@@ -7469,12 +7469,10 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
		goto out;
	}

	rcu_read_lock();

	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
	if (!sd) {
		target_cpu = prev_cpu;
		goto unlock;
		goto out;
	}

	sync_entity_load_avg(&p->se);
@@ -7484,14 +7482,14 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
				    &fbt_env);
	if (next_cpu == -1) {
		target_cpu = prev_cpu;
		goto unlock;
		goto out;
	}

	if (fbt_env.placement_boost || fbt_env.need_idle ||
			fbt_env.avoid_prev_cpu || (rtg_target &&
			!cpumask_test_cpu(prev_cpu, rtg_target))) {
		target_cpu = next_cpu;
		goto unlock;
		goto out;
	}

	/* Unconditionally prefer IDLE CPUs for boosted/prefer_idle tasks */
@@ -7499,7 +7497,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
		schedstat_inc(p->se.statistics.nr_wakeups_secb_idle_bt);
		schedstat_inc(this_rq()->eas_stats.secb_idle_bt);
		target_cpu = next_cpu;
		goto unlock;
		goto out;
	}

	target_cpu = prev_cpu;
@@ -7533,7 +7531,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
			schedstat_inc(p->se.statistics.nr_wakeups_secb_insuff_cap);
			schedstat_inc(this_rq()->eas_stats.secb_insuff_cap);
			target_cpu = next_cpu;
			goto unlock;
			goto out;
		}

		/* Check if EAS_CPU_NXT is a more energy efficient CPU */
@@ -7541,20 +7539,18 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
			schedstat_inc(p->se.statistics.nr_wakeups_secb_nrg_sav);
			schedstat_inc(this_rq()->eas_stats.secb_nrg_sav);
			target_cpu = eenv.cpu[eenv.next_idx].cpu_id;
			goto unlock;
			goto out;
		}

		schedstat_inc(p->se.statistics.nr_wakeups_secb_no_nrg_sav);
		schedstat_inc(this_rq()->eas_stats.secb_no_nrg_sav);
		target_cpu = prev_cpu;
		goto unlock;
		goto out;
	}

	schedstat_inc(p->se.statistics.nr_wakeups_secb_count);
	schedstat_inc(this_rq()->eas_stats.secb_count);

unlock:
	rcu_read_unlock();
out:
	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
			      fbt_env.need_idle, fastpath,
@@ -7590,8 +7586,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
			cpumask_test_cpu(cpu, tsk_cpus_allowed(p)));
	}

	if (energy_aware())
		return select_energy_cpu_brute(p, prev_cpu, sync);
	if (energy_aware()) {
		rcu_read_lock();
		new_cpu = select_energy_cpu_brute(p, prev_cpu, sync);
		rcu_read_unlock();
		return new_cpu;
	}

	rcu_read_lock();
	for_each_domain(cpu, tmp) {
+4 −7
Original line number Diff line number Diff line
@@ -701,14 +701,11 @@ static inline void inter_cluster_migration_fixup
	BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
}

static int load_to_index(u32 load)
static u32 load_to_index(u32 load)
{
	if (load < sched_load_granule)
		return 0;
	else if (load >= sched_ravg_window)
		return NUM_LOAD_INDICES - 1;
	else
		return load / sched_load_granule;
	u32 index = load / sched_load_granule;

	return min(index, (u32)(NUM_LOAD_INDICES - 1));
}

static void