Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7f6de19f authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: fix compilation issues for !CONFIG_SCHED_WALT"

parents cc2bf370 71a3c616
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1253,7 +1253,11 @@ TRACE_EVENT(sched_task_util,
		__entry->is_rtg                 = is_rtg;
		__entry->rtg_skip_min		= rtg_skip_min;
		__entry->start_cpu		= start_cpu;
#ifdef CONFIG_SCHED_WALT
		__entry->unfilter		= p->unfilter;
#else
		__entry->unfilter		= 0;
#endif
	),

	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d",
+35 −26
Original line number Diff line number Diff line
@@ -27,7 +27,6 @@
#include "walt.h"

#ifdef CONFIG_SMP
static inline bool get_rtg_status(struct task_struct *p);
static inline bool task_fits_max(struct task_struct *p, int cpu);
#endif /* CONFIG_SMP */

@@ -6727,14 +6726,47 @@ unsigned long capacity_curr_of(int cpu)
	return cap_scale(max_cap, scale_freq);
}

#ifdef CONFIG_SCHED_WALT
static inline bool get_rtg_status(struct task_struct *p)
{
	struct related_thread_group *grp;
	bool ret = false;

	rcu_read_lock();

	grp = task_related_thread_group(p);
	if (grp)
		ret = grp->skip_min;

	rcu_read_unlock();

	return ret;
}

static inline bool task_skip_min_cpu(struct task_struct *p)
{
	return sched_boost() != CONSERVATIVE_BOOST &&
		get_rtg_status(p) && p->unfilter;
}
#else
static inline bool get_rtg_status(struct task_struct *p)
{
	return false;
}

static inline bool task_skip_min_cpu(struct task_struct *p)
{
	return false;
}
#endif

static int get_start_cpu(struct task_struct *p)
{
	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
	int start_cpu = rd->min_cap_orig_cpu;
	bool boosted = schedtune_task_boost(p) > 0 ||
			task_boost_policy(p) == SCHED_BOOST_ON_BIG;
	bool task_skip_min = (sched_boost() != CONSERVATIVE_BOOST)
				&& get_rtg_status(p) && p->unfilter;
	bool task_skip_min = task_skip_min_cpu(p);

	/*
	 * note about min/mid/max_cap_orig_cpu - either all of them will be -ve
@@ -7431,29 +7463,6 @@ static inline int wake_to_idle(struct task_struct *p)
			(p->flags & PF_WAKE_UP_IDLE);
}

#ifdef CONFIG_SCHED_WALT
static inline bool get_rtg_status(struct task_struct *p)
{
	struct related_thread_group *grp;
	bool ret = false;

	rcu_read_lock();

	grp = task_related_thread_group(p);
	if (grp)
		ret = grp->skip_min;

	rcu_read_unlock();

	return ret;
}
#else
static inline bool get_rtg_status(struct task_struct *p)
{
	return false;
}
#endif

/* return true if cpu should be chosen over best_energy_cpu */
static inline bool select_cpu_same_energy(int cpu, int best_cpu, int prev_cpu)
{
+5 −0
Original line number Diff line number Diff line
@@ -3007,6 +3007,11 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
	return 1;
}

static inline int mark_reserved(int cpu)
{
	return 0;
}

static inline void clear_reserved(int cpu) { }
static inline int alloc_related_thread_groups(void) { return 0; }