Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a97bdceb authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: relax preferred cluster requirement"

parents 379d13e3 e821b9e7
Loading
Loading
Loading
Loading
+58 −16
Original line number Diff line number Diff line
@@ -7047,10 +7047,9 @@ static bool is_packing_eligible(struct task_struct *p, int target_cpu,
}

static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
			   struct cpumask *rtg_target)
			   struct cpumask *rtg_target,
			   unsigned long target_capacity)
{
	int fcpu = group_first_cpu(sg);

	/* Are all CPUs isolated in this group? */
	if (!sg->group_weight)
		return true;
@@ -7062,20 +7061,50 @@ static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
	if (cpumask_subset(&p->cpus_allowed, sched_group_span(sg)))
		return false;

	if (!task_fits_max(p, fcpu))
		return true;

	if (rtg_target && !cpumask_test_cpu(fcpu, rtg_target))
	/*
	 * if we have found a target cpu within a group, don't bother checking
	 * other groups
	 */
	if (target_capacity != ULONG_MAX)
		return true;

	return false;
}

static int start_cpu(bool boosted)
static int start_cpu(struct task_struct *p, bool boosted,
		     struct cpumask *rtg_target)
{
	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
	int start_cpu = -1;

	if (boosted)
		return rd->max_cap_orig_cpu;

	/* Where the task should land based on its demand */
	if (rd->min_cap_orig_cpu != -1
			&& task_fits_max(p, rd->min_cap_orig_cpu))
		start_cpu = rd->min_cap_orig_cpu;
	else if (rd->mid_cap_orig_cpu != -1
				&& task_fits_max(p, rd->mid_cap_orig_cpu))
		start_cpu = rd->mid_cap_orig_cpu;
	else
		start_cpu = rd->max_cap_orig_cpu;

	return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
	/*
	 * start it up to its preferred cluster if the preferred clusteris
	 * higher capacity
	 */
	if (start_cpu != -1 && rtg_target &&
			!cpumask_test_cpu(start_cpu, rtg_target)) {
		int rtg_target_cpu = cpumask_first(rtg_target);

		if (capacity_orig_of(start_cpu) <
			capacity_orig_of(rtg_target_cpu)) {
			start_cpu = rtg_target_cpu;
		}
	}

	return start_cpu;
}

static inline int find_best_target(struct task_struct *p, int *backup_cpu,
@@ -7096,13 +7125,14 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	int best_idle_cpu = -1;
	int target_cpu = -1;
	int cpu, i;
	unsigned long spare_cap;
	long spare_cap, most_spare_cap = 0;
	int most_spare_cap_cpu = -1;
	unsigned int active_cpus_count = 0;

	*backup_cpu = -1;

	/* Find start CPU based on boost value */
	cpu = start_cpu(boosted);
	cpu = start_cpu(p, boosted, fbt_env->rtg_target);
	if (cpu < 0)
		return -1;

@@ -7114,7 +7144,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	/* Scan CPUs in all SDs */
	sg = sd->groups;
	do {
		if (skip_sg(p, sg, fbt_env->rtg_target))
		if (skip_sg(p, sg, fbt_env->rtg_target, target_capacity))
			continue;

		for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) {
@@ -7147,6 +7177,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			new_util = wake_util + task_util(p);
			spare_cap = capacity_orig_of(i) - wake_util;

			if (spare_cap > most_spare_cap) {
				most_spare_cap = spare_cap;
				most_spare_cap_cpu = i;
			}

			/*
			 * Cumulative demand may already be accounting for the
			 * task. If so, add just the boost-utilization to
@@ -7340,11 +7375,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
		}

		/*
		 * When placement boost is active, we traverse CPUs
		 * other than min capacity CPUs. Reset the target_capacity
		 * to keep traversing the other clusters.
		 * We start with group where the task should be placed. When
		 * placement boost is active reset the target_capacity to keep
		 * traversing the other higher clusters. Don't reset it if we
		 * are already at the highest cluster.
		 */
		if (fbt_env->placement_boost)
		if (fbt_env->placement_boost &&
			!is_max_capacity_cpu(group_first_cpu(sg)))
			target_capacity = ULONG_MAX;

	} while (sg = sg->next, sg != sd->groups);
@@ -7384,6 +7421,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
		? best_active_cpu
		: best_idle_cpu;

	if (target_cpu == -1 && most_spare_cap_cpu != -1 &&
		/* ensure we use active cpu for active migration */
		!(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)))
		target_cpu = most_spare_cap_cpu;

	trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
				     best_idle_cpu, best_active_cpu,
				     target_cpu);
+2 −0
Original line number Diff line number Diff line
@@ -731,6 +731,8 @@ struct root_domain {

	/* First cpu with maximum and minimum original capacity */
	int max_cap_orig_cpu, min_cap_orig_cpu;
	/* First cpu with mid capacity */
	int mid_cap_orig_cpu;
};

extern struct root_domain def_root_domain;
+15 −0
Original line number Diff line number Diff line
@@ -303,6 +303,7 @@ static int init_rootdomain(struct root_domain *rd)
		goto free_cpudl;

	rd->max_cap_orig_cpu = rd->min_cap_orig_cpu = -1;
	rd->mid_cap_orig_cpu = -1;

	return 0;

@@ -1851,6 +1852,20 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att

		cpu_attach_domain(sd, d.rd, i);
	}

	/* set the mid capacity cpu (assumes only 3 capacities) */
	for_each_cpu(i, cpu_map) {
		int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
		int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);

		if ((cpu_rq(i)->cpu_capacity_orig
				!=  cpu_rq(min_cpu)->cpu_capacity_orig) &&
			(cpu_rq(i)->cpu_capacity_orig
				!=  cpu_rq(max_cpu)->cpu_capacity_orig)) {
			WRITE_ONCE(d.rd->mid_cap_orig_cpu, i);
			break;
		}
	}
	rcu_read_unlock();

	if (rq && sched_debug_enabled) {