Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04717e47 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I2d3c30440ff69f87dc975b33df9d0531115375ae
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 860a87cc
Loading
Loading
Loading
Loading
+7 −13
Original line number Diff line number Diff line
@@ -23,8 +23,10 @@
 * boost is responsible for disabling it as well.
 */

unsigned int sysctl_sched_boost;
static enum sched_boost_policy boost_policy;
unsigned int sysctl_sched_boost; /* To/from userspace */
unsigned int sched_boost_type; /* currently activated sched boost */
enum sched_boost_policy boost_policy;

static enum sched_boost_policy boost_policy_dt = SCHED_BOOST_NONE;
static DEFINE_MUTEX(boost_mutex);
static int boost_refcount[MAX_NUM_BOOST_TYPE];
@@ -60,11 +62,6 @@ static void set_boost_policy(int type)
	boost_policy = SCHED_BOOST_ON_ALL;
}

enum sched_boost_policy sched_boost_policy(void)
{
	return boost_policy;
}

static bool verify_boost_params(int type)
{
	return type >= RESTRAINED_BOOST_DISABLE && type <= RESTRAINED_BOOST;
@@ -159,8 +156,10 @@ static void _sched_set_boost(int type)
	else
		type = NO_BOOST;

	set_boost_policy(type);
	sched_boost_type = type;
	sysctl_sched_boost = type;

	set_boost_policy(type);
	trace_sched_set_boost(type);
}

@@ -217,8 +216,3 @@ int sched_boost_handler(struct ctl_table *table, int write,
	mutex_unlock(&boost_mutex);
	return ret;
}

int sched_boost(void)
{
	return sysctl_sched_boost;
}
+1 −1
Original line number Diff line number Diff line
@@ -7981,7 +7981,7 @@ static inline int wake_to_idle(struct task_struct *p)
#ifdef CONFIG_SCHED_WALT
static inline bool is_task_util_above_min_thresh(struct task_struct *p)
{
	unsigned int threshold = (sysctl_sched_boost == CONSERVATIVE_BOOST) ?
	unsigned int threshold = (sched_boost() == CONSERVATIVE_BOOST) ?
			sysctl_sched_min_task_util_for_boost :
			sysctl_sched_min_task_util_for_colocation;

+13 −7
Original line number Diff line number Diff line
@@ -2524,11 +2524,6 @@ enum sched_boost_policy {
	SCHED_BOOST_ON_ALL,
};

#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
#define CONSERVATIVE_BOOST 2
#define RESTRAINED_BOOST 3

/*
 * Returns the rq capacity of any rq in a group. This does not play
 * well with groups where rq capacity can change independently.
@@ -2831,7 +2826,18 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)

#define	CPU_RESERVED	1

extern int sched_boost(void);
extern enum sched_boost_policy boost_policy;
static inline enum sched_boost_policy sched_boost_policy(void)
{
	return boost_policy;
}

extern unsigned int sched_boost_type;
static inline int sched_boost(void)
{
	return sched_boost_type;
}

extern int preferred_cluster(struct sched_cluster *cluster,
						struct task_struct *p);
extern struct sched_cluster *rq_cluster(struct rq *rq);
@@ -2939,7 +2945,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
		 * Filter out tasks less than min task util threshold
		 * under conservative boost.
		 */
		if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
		if (sched_boost() == CONSERVATIVE_BOOST &&
				task_util(p) <=
				sysctl_sched_min_task_util_for_boost)
			policy = SCHED_BOOST_NONE;
+1 −1
Original line number Diff line number Diff line
@@ -3129,7 +3129,7 @@ static void walt_update_coloc_boost_load(void)
	struct sched_cluster *cluster;

	if (!sysctl_sched_little_cluster_coloc_fmin_khz ||
			sysctl_sched_boost == CONSERVATIVE_BOOST)
			sched_boost() == CONSERVATIVE_BOOST)
		return;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);