Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2f2d8729 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "power: em: correct increasing freq/power ratio"

parents f5f00f4f 79785b91
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -141,7 +141,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
		 */
		opp_eff = freq / power;
		if (opp_eff >= prev_opp_eff)
			pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n",
			pr_debug("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n",
					cpu, i, i - 1);
		prev_opp_eff = opp_eff;
	}
@@ -151,6 +151,10 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
	for (i = 0; i < nr_states; i++) {
		table[i].cost = div64_u64(fmax * table[i].power,
					  table[i].frequency);
		if (i > 0 && (table[i].cost < table[i - 1].cost) &&
				(table[i].power > table[i - 1].power)) {
			table[i].cost = table[i - 1].cost;
		}
	}

	pd->table = table;
+3 −1
Original line number Diff line number Diff line
@@ -120,7 +120,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask &&
	    cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
	    cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr) &&
	    cpumask_and(later_mask, later_mask, cpu_active_mask)) {
		return 1;
	} else {
		int best_cpu = cpudl_maximum(cp);
@@ -128,6 +129,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
		WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

		if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
		    cpumask_test_cpu(best_cpu, cpu_active_mask) &&
		    dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
			if (later_mask)
				cpumask_set_cpu(best_cpu, later_mask);
+22 −0
Original line number Diff line number Diff line
@@ -8061,6 +8061,19 @@ group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
	return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity);
}

/*
 * group_similar_cpu_capacity: Returns true if the minimum capacity of the
 * compared groups differ by less than 12.5%.
 */
static inline bool
group_similar_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
{
	long diff = sg->sgc->min_capacity - ref->sgc->min_capacity;
	long max = max(sg->sgc->min_capacity, ref->sgc->min_capacity);

	return abs(diff) < max >> 3;
}

static inline enum
group_type group_classify(struct sched_group *group,
			  struct sg_lb_stats *sgs)
@@ -8215,6 +8228,15 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	    group_smaller_min_cpu_capacity(sds->local, sg))
		return false;

	/*
	 * Candidate sg doesn't face any severe imbalance issues so
	 * don't disturb unless the groups are of similar capacity
	 * where balancing is more harmless.
	 */
	if (sgs->group_type == group_other &&
		!group_similar_cpu_capacity(sds->local, sg))
		return false;

	/*
	 * If we have more than one misfit sg go with the biggest misfit.
	 */