Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5bf0140 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: Improve the scheduler"

parents 37084ff1 82fea0f9
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -2489,7 +2489,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
	struct rq_flags rf;

#if defined(CONFIG_SMP)
	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
	if ((sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) ||
			walt_want_remote_wakeup()) {
		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
		ttwu_queue_remote(p, cpu, wake_flags);
		return;
@@ -5650,7 +5651,7 @@ bool is_sched_lib_based_app(pid_t pid)
	char *libname, *lib_list;
	struct vm_area_struct *vma;
	char path_buf[LIB_PATH_LENGTH];
	char tmp_lib_name[LIB_PATH_LENGTH];
	char *tmp_lib_name;
	bool found = false;
	struct task_struct *p;
	struct mm_struct *mm;
@@ -5658,11 +5659,16 @@ bool is_sched_lib_based_app(pid_t pid)
	if (strnlen(sched_lib_name, LIB_PATH_LENGTH) == 0)
		return false;

	tmp_lib_name = kmalloc(LIB_PATH_LENGTH, GFP_KERNEL);
	if (!tmp_lib_name)
		return false;

	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		kfree(tmp_lib_name);
		return false;
	}

@@ -5700,6 +5706,7 @@ bool is_sched_lib_based_app(pid_t pid)
	mmput(mm);
put_task_struct:
	put_task_struct(p);
	kfree(tmp_lib_name);
	return found;
}

+3 −8
Original line number Diff line number Diff line
@@ -6499,7 +6499,6 @@ enum fastpaths {
	NONE = 0,
	SYNC_WAKEUP,
	PREV_CPU_FASTPATH,
	MANY_WAKEUP,
};

static void walt_find_best_target(struct sched_domain *sd, cpumask_t *cpus,
@@ -6950,6 +6949,9 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
	bool boosted = is_uclamp_boosted || (task_boost > 0);
	int start_cpu, order_index, end_index;

	if (walt_is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
			cpumask_test_cpu(prev_cpu, &p->cpus_mask))
		return prev_cpu;

	if (unlikely(!cpu_array))
		goto eas_not_ready;
@@ -6979,13 +6981,6 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
		goto done;
	}

	if (walt_is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
				bias_to_this_cpu(p, prev_cpu, start_cpu)) {
		best_energy_cpu = prev_cpu;
		fbt_env.fastpath = MANY_WAKEUP;
		goto done;
	}

	rcu_read_lock();
	pd = rcu_dereference(rd->pd);
	if (!pd)
+11 −2
Original line number Diff line number Diff line
@@ -2859,6 +2859,12 @@ enum sched_boost_policy {

#ifdef CONFIG_SCHED_WALT

#define WALT_MANY_WAKEUP_DEFAULT 1000
static inline bool walt_want_remote_wakeup(void)
{
	return sysctl_sched_many_wakeup_threshold < WALT_MANY_WAKEUP_DEFAULT;
}

static inline int cluster_first_cpu(struct walt_sched_cluster *cluster)
{
	return cpumask_first(&cluster->cpus);
@@ -2996,7 +3002,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
#define CPU_RESERVED    1

extern enum sched_boost_policy __weak boost_policy;
extern unsigned int __weak sched_task_filter_util;
static inline enum sched_boost_policy sched_boost_policy(void)
{
	return boost_policy;
@@ -3120,7 +3125,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
		 * under conservative boost.
		 */
		if (sched_boost() == CONSERVATIVE_BOOST &&
				task_util(p) <= sched_task_filter_util)
			task_util(p) <= sysctl_sched_min_task_util_for_boost)
			policy = SCHED_BOOST_NONE;
	}

@@ -3268,6 +3273,10 @@ static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
}

static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
static inline bool walt_want_remote_wakeup(void)
{
	return false;
}
#endif  /* CONFIG_SCHED_WALT */

struct sched_avg_stats {