Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a680bfc7 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: deprecate sched_is_energy_aware() and energy_aware()"

parents d6523f1c 19135a4f
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -175,7 +175,6 @@ bool cpus_share_cache(int this_cpu, int that_cpu);

typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
extern bool sched_is_energy_aware(void);

#define SDTL_OVERLAP	0x01

+2 −11
Original line number Diff line number Diff line
@@ -5807,15 +5807,6 @@ static void record_wakee(struct task_struct *p)
	}
}

/*
 * Externally visible function. Let's keep the one above
 * so that the check is inlined/optimized in the sched paths.
 */
bool sched_is_energy_aware(void)
{
	return energy_aware();
}

/*
 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
 *
@@ -7516,7 +7507,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
	int want_affine = 0;
	int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);

	if (energy_aware()) {
	if (static_branch_unlikely(&sched_energy_present)) {
		rcu_read_lock();
		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
		rcu_read_unlock();
@@ -10719,7 +10710,7 @@ static inline int find_new_ilb(void)
	rcu_read_unlock();

	if (sd && (ilb >= nr_cpu_ids || !idle_cpu(ilb))) {
		if (!energy_aware() ||
		if (!static_branch_unlikely(&sched_energy_present) ||
				(capacity_orig_of(cpu) ==
				cpu_rq(cpu)->rd->max_cpu_capacity.val ||
				cpu_overutilized(cpu))) {
+2 −2
Original line number Diff line number Diff line
@@ -1533,7 +1533,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
	 * will have to sort it out.
	 */
	may_not_preempt = task_may_not_preempt(curr, cpu);
	if (energy_aware() || may_not_preempt ||
	if (static_branch_unlikely(&sched_energy_present) || may_not_preempt ||
	    (unlikely(rt_task(curr)) &&
	     (curr->nr_cpus_allowed < 2 ||
	      curr->prio <= p->prio))) {
@@ -1872,7 +1872,7 @@ static int find_lowest_rq(struct task_struct *task)
	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
		return -1; /* No targets found */

	if (energy_aware())
	if (static_branch_unlikely(&sched_energy_present))
		cpu = rt_energy_aware_wake_cpu(task);

	if (cpu == -1)
+0 −13
Original line number Diff line number Diff line
@@ -3255,19 +3255,6 @@ static inline void walt_map_freq_to_load(void) { }
static inline void walt_update_min_max_capacity(void) { }
#endif	/* CONFIG_SCHED_WALT */

#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
static inline bool energy_aware(void)
{
	return sysctl_sched_energy_aware;
}
#else
static inline bool energy_aware(void)
{
	return 0;
}
#endif


struct sched_avg_stats {
	int nr;
	int nr_misfit;