Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e193cefc authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I310bbdc19bb65a0c562ec6a208f2da713eba954d
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 2e2260c3
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@ extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_user_hint;
extern const int sched_user_hint_max;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
+2 −2
Original line number Diff line number Diff line
@@ -2008,7 +2008,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p)

	rcu_read_lock();
	grp = task_related_thread_group(p);
	if (update_preferred_cluster(grp, p, old_load))
	if (update_preferred_cluster(grp, p, old_load, false))
		set_preferred_cluster(grp);
	rcu_read_unlock();
}
@@ -3203,7 +3203,7 @@ void scheduler_tick(void)

	rcu_read_lock();
	grp = task_related_thread_group(curr);
	if (update_preferred_cluster(grp, curr, old_load))
	if (update_preferred_cluster(grp, curr, old_load, true))
		set_preferred_cluster(grp);
	rcu_read_unlock();

+2 −1
Original line number Diff line number Diff line
@@ -3912,7 +3912,8 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
	if (is_min_capacity_cpu(cpu)) {
		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
			task_boost > 0 ||
			schedtune_task_boost(p) > 0)
			schedtune_task_boost(p) > 0 ||
			walt_should_kick_upmigrate(p, cpu))
			return false;
	} else { /* mid cap cpu */
		if (task_boost > 1)
+2 −2
Original line number Diff line number Diff line
@@ -2680,7 +2680,7 @@ extern unsigned int __read_mostly sched_load_granule;

extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern int update_preferred_cluster(struct related_thread_group *grp,
			struct task_struct *p, u32 old_load);
			struct task_struct *p, u32 old_load, bool from_tick);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);

@@ -2995,7 +2995,7 @@ static inline u32 task_load(struct task_struct *p) { return 0; }
static inline u32 task_pl(struct task_struct *p) { return 0; }

static inline int update_preferred_cluster(struct related_thread_group *grp,
			 struct task_struct *p, u32 old_load)
			 struct task_struct *p, u32 old_load, bool from_tick)
{
	return 0;
}
+14 −6
Original line number Diff line number Diff line
@@ -522,8 +522,13 @@ static inline u64 freq_policy_load(struct rq *rq)
		break;
	}

	if (should_apply_suh_freq_boost(cluster))
		load = div64_u64(load * sysctl_sched_user_hint, (u64)100);
	if (should_apply_suh_freq_boost(cluster)) {
		if (is_suh_max())
			load = sched_ravg_window;
		else
			load = div64_u64(load * sysctl_sched_user_hint,
					 (u64)100);
	}

done:
	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
@@ -2623,6 +2628,9 @@ void update_best_cluster(struct related_thread_group *grp,
		return;
	}

	if (is_suh_max())
		demand = sched_group_upmigrate;

	if (!grp->skip_min) {
		if (demand >= sched_group_upmigrate) {
			grp->skip_min = true;
@@ -2719,13 +2727,16 @@ void set_preferred_cluster(struct related_thread_group *grp)
}

int update_preferred_cluster(struct related_thread_group *grp,
		struct task_struct *p, u32 old_load)
		struct task_struct *p, u32 old_load, bool from_tick)
{
	u32 new_load = task_load(p);

	if (!grp)
		return 0;

	if (unlikely(from_tick && is_suh_max()))
		return 1;

	/*
	 * Update if task's load has changed significantly or a complete window
	 * has passed since we last updated preference
@@ -2740,8 +2751,6 @@ int update_preferred_cluster(struct related_thread_group *grp,
#define ADD_TASK	0
#define REM_TASK	1

#define DEFAULT_CGROUP_COLOC_ID 1

static inline struct related_thread_group*
lookup_related_thread_group(unsigned int group_id)
{
@@ -2995,7 +3004,6 @@ static bool is_cluster_hosting_top_app(struct sched_cluster *cluster)
	return (is_min_capacity_cluster(cluster) == grp_on_min);
}


static unsigned long max_cap[NR_CPUS];
static unsigned long thermal_cap_cpu[NR_CPUS];

Loading