Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28f98b64 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes I45efd9df,Icf663abb,I2e2c3de2 into msm-4.14

* changes:
  sched/fair: Don't miss out an idle CPU when placement boost is active
  sched/fair: Add support for need_idle task placement
  sched/fair: Pass arguments to find_best_target() in a structure
parents 4b7a5e14 35cb72b8
Loading
Loading
Loading
Loading
+62 −30
Original line number Diff line number Diff line
@@ -6964,6 +6964,12 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
	return task_fits_capacity(p, capacity, cpu);
}

struct find_best_target_env {
	struct cpumask *rtg_target;
	bool placement_boost;
	bool need_idle;
};

static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
			   struct cpumask *rtg_target)
{
@@ -6991,8 +6997,7 @@ static int start_cpu(bool boosted)

static inline int find_best_target(struct task_struct *p, int *backup_cpu,
				   bool boosted, bool prefer_idle,
				   struct cpumask *rtg_target,
				   enum sched_boost_policy placement_boost)
				   struct find_best_target_env *fbt_env)
{
	unsigned long min_util = boosted_task_util(p);
	unsigned long target_capacity = ULONG_MAX;
@@ -7025,7 +7030,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	/* Scan CPUs in all SDs */
	sg = sd->groups;
	do {
		if (skip_sg(p, sg, rtg_target))
		if (skip_sg(p, sg, fbt_env->rtg_target))
			continue;

		for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) {
@@ -7058,19 +7063,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			new_util = wake_util + task_util(p);
			spare_cap = capacity_orig_of(i) - wake_util;

			/*
			 * When placement boost is active, we traverse CPUs
			 * other than min capacity CPUs. Pick the CPU which
			 * has the maximum spare capacity.
			 */
			if (placement_boost) {
				if (spare_cap > target_max_spare_cap) {
					target_cpu = i;
					target_max_spare_cap = spare_cap;
				}
				continue;
			}

			/*
			 * Cumulative demand may already be accounting for the
			 * task. If so, add just the boost-utilization to
@@ -7255,8 +7247,23 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			target_cpu = i;
		}

		/*
		 * When placement boost is active, we traverse CPUs
		 * other than min capacity CPUs. Reset the target_capacity
		 * to keep traversing the other clusters.
		 */
		if (fbt_env->placement_boost)
			target_capacity = ULONG_MAX;

	} while (sg = sg->next, sg != sd->groups);

	if (fbt_env->need_idle || fbt_env->placement_boost) {
		if (best_idle_cpu != -1) {
			target_cpu = best_idle_cpu;
			best_idle_cpu = -1;
		}
	}

	/*
	 * For non latency sensitive tasks, cases B and C in the previous loop,
	 * we pick the best IDLE CPU only if we was not able to find a target
@@ -7457,6 +7464,33 @@ static inline struct energy_env *get_eenv(struct task_struct *p, int prev_cpu)
	return eenv;
}

static inline int wake_to_idle(struct task_struct *p)
{
	return (current->flags & PF_WAKE_UP_IDLE) ||
		 (p->flags & PF_WAKE_UP_IDLE);
}

static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
	struct related_thread_group *grp;
	struct cpumask *rtg_target;

	rcu_read_lock();

	grp = task_related_thread_group(p);
	if (grp && grp->preferred_cluster) {
		rtg_target = &grp->preferred_cluster->cpus;
		if (!task_fits_max(p, cpumask_first(rtg_target)))
			rtg_target = NULL;
	} else {
		rtg_target = NULL;
	}

	rcu_read_unlock();

	return rtg_target;
}

/*
 * Needs to be called inside rcu_read_lock critical section.
 * sd is a pointer to the sched domain we wish to use for an
@@ -7471,18 +7505,12 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
	int cpu_iter, eas_cpu_idx = EAS_CPU_NXT;
	int energy_cpu = -1;
	struct energy_env *eenv;
	struct related_thread_group *grp;
	struct cpumask *rtg_target = NULL;
	enum sched_boost_policy placement_boost =
			task_sched_boost(p) ? sched_boost_policy() :
			SCHED_BOOST_NONE;
	struct cpumask *rtg_target = find_rtg_target(p);
	struct find_best_target_env fbt_env;
	bool need_idle = wake_to_idle(p);

	grp = task_related_thread_group(p);
	if (grp && grp->preferred_cluster)
		rtg_target = &grp->preferred_cluster->cpus;

	if (rtg_target && !task_fits_max(p, cpumask_first(rtg_target)))
		rtg_target = NULL;
	if (need_idle)
		sync = 0;

	if (sysctl_sched_sync_hint_enable && sync &&
				bias_to_waker_cpu(p, cpu, rtg_target)) {
@@ -7536,11 +7564,14 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,

		eenv->max_cpu_count = EAS_CPU_BKP + 1;

		fbt_env.rtg_target = rtg_target;
		fbt_env.placement_boost = task_placement_boost_enabled(p);
		fbt_env.need_idle = need_idle;

		/* Find a cpu with sufficient capacity */
		eenv->cpu[EAS_CPU_NXT].cpu_id = find_best_target(p,
				&eenv->cpu[EAS_CPU_BKP].cpu_id,
				boosted, prefer_idle, rtg_target,
				placement_boost);
				boosted, prefer_idle, &fbt_env);

		/* take note if no backup was found */
		if (eenv->cpu[EAS_CPU_BKP].cpu_id < 0)
@@ -7560,7 +7591,8 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		return energy_cpu;
	}

	if (rtg_target != NULL || placement_boost)
	if (use_fbt && (rtg_target != NULL || fbt_env.placement_boost ||
			fbt_env.need_idle))
		return eenv->cpu[EAS_CPU_NXT].cpu_id;

	/* find most energy-efficient CPU */
+14 −1
Original line number Diff line number Diff line
@@ -2894,6 +2894,14 @@ static inline unsigned int power_cost(int cpu, u64 demand)

void note_task_waking(struct task_struct *p, u64 wallclock);

static inline bool task_placement_boost_enabled(struct task_struct *p)
{
	if (task_sched_boost(p))
		return sched_boost_policy() != SCHED_BOOST_NONE;

	return false;
}

#else	/* CONFIG_SCHED_WALT */

struct walt_sched_stats;
@@ -2902,7 +2910,12 @@ struct sched_cluster;

static inline bool task_sched_boost(struct task_struct *p)
{
	return true;
	return false;
}

static inline bool task_placement_boost_enabled(struct task_struct *p)
{
	return false;
}

static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }