Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 354981ab authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: Fix negative count of sched_asym_cpucapacity static key"

parents be77d9f5 f6c1bf1f
Loading
Loading
Loading
Loading
+5 −16
Original line number Diff line number Diff line
@@ -6937,7 +6937,6 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
	int weight, cpu = smp_processor_id(), best_energy_cpu = prev_cpu;
	struct sched_domain *sd;
	struct perf_domain *pd;
	unsigned long cur_energy;
	cpumask_t *candidates;
@@ -6992,20 +6991,6 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
	if (!pd)
		goto fail;

	/*
	 * Energy-aware wake-up happens on the lowest sched_domain starting
	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
	 */
	sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
	while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
		sd = sd->parent;
	if (!sd)
		goto fail;

	sync_entity_load_avg(&p->se);
	if (!task_util_est(p))
		goto unlock;

	fbt_env.is_rtg = is_rtg;
	fbt_env.start_cpu = start_cpu;
	fbt_env.order_index = order_index;
@@ -10734,11 +10719,15 @@ static inline int find_new_ilb(void)
{
	int ilb;

	if (sched_energy_enabled())
	if (static_branch_likely(&sched_asym_cpucapacity))
		return find_energy_aware_new_ilb();

	for_each_cpu_and(ilb, nohz.idle_cpus_mask,
			      housekeeping_cpumask(HK_FLAG_MISC)) {
#ifdef CONFIG_SCHED_WALT
		if (cpu_isolated(ilb))
			continue;
#endif
		if (idle_cpu(ilb))
			return ilb;
	}
+9 −17
Original line number Diff line number Diff line
@@ -347,7 +347,14 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
	if (!sysctl_sched_energy_aware)
		goto free;

	/* EAS is enabled for asymmetric CPU capacity topologies. */
	/*
	 * EAS gets disabled when there are no asymmetric capacity
	 * CPUs in the system. For example, all big CPUs are
	 * hotplugged out on a b.L system. We want EAS enabled
	 * all the time to get both power and perf benefits. Apply
	 * this policy when WALT is enabled.
	 */
#ifndef CONFIG_SCHED_WALT
	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
		if (sched_debug()) {
			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
@@ -355,6 +362,7 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
		}
		goto free;
	}
#endif

	for_each_cpu(i, cpu_map) {
		/* Skip already covered CPUs. */
@@ -641,22 +649,6 @@ static void update_top_cache_domain(int cpu)
	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);

	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
	/*
	 * EAS gets disabled when there are no asymmetric capacity
	 * CPUs in the system. For example, all big CPUs are
	 * hotplugged out on a b.L system. We want EAS enabled
	 * all the time to get both power and perf benefits. So,
	 * lets assign sd_asym_cpucapacity to the only available
	 * sched domain. This is also important for a single cluster
	 * systems which wants to use EAS.
	 *
	 * Setting sd_asym_cpucapacity() to a sched domain which
	 * has all symmetric capacity CPUs is technically incorrect but
	 * works well for us in getting EAS enabled all the time.
	 */
	if (!sd)
		sd = cpu_rq(cpu)->sd;

	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
}