Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 00aae8d5 authored by Srinath Sridharan's avatar Srinath Sridharan Committed by John Stultz
Browse files

sched/tune: Add support for negative boost values

Change-Id: I164ee04ba98c3a776605f18cb65ee61b3e917939

Contains also:

eas/stune: schedtune cpu boost_max must be non-negative.

This is to avoid under-accounting cpu capacity which may
cause task stacking and frequency spikes.

Change-Id: Ie1c1cbd52a6edb77b4c15a830030aa748dff6f29
parent 6ba071d8
Loading
Loading
Loading
Loading
+10 −10
Original line number Original line Diff line number Diff line
@@ -682,14 +682,14 @@ TRACE_EVENT(sched_tune_config,
 */
 */
TRACE_EVENT(sched_boost_cpu,
TRACE_EVENT(sched_boost_cpu,


	TP_PROTO(int cpu, unsigned long util, unsigned long margin),
	TP_PROTO(int cpu, unsigned long util, long margin),


	TP_ARGS(cpu, util, margin),
	TP_ARGS(cpu, util, margin),


	TP_STRUCT__entry(
	TP_STRUCT__entry(
		__field( int,		cpu			)
		__field( int,		cpu			)
		__field( unsigned long,	util			)
		__field( unsigned long,	util			)
		__field( unsigned long,	margin			)
		__field(long,		margin			)
	),
	),


	TP_fast_assign(
	TP_fast_assign(
@@ -698,7 +698,7 @@ TRACE_EVENT(sched_boost_cpu,
		__entry->margin	= margin;
		__entry->margin	= margin;
	),
	),


	TP_printk("cpu=%d util=%lu margin=%lu",
	TP_printk("cpu=%d util=%lu margin=%ld",
		  __entry->cpu,
		  __entry->cpu,
		  __entry->util,
		  __entry->util,
		  __entry->margin)
		  __entry->margin)
@@ -710,7 +710,7 @@ TRACE_EVENT(sched_boost_cpu,
TRACE_EVENT(sched_tune_tasks_update,
TRACE_EVENT(sched_tune_tasks_update,


	TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
	TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
		unsigned int boost, unsigned int max_boost),
		int boost, int max_boost),


	TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
	TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),


@@ -720,8 +720,8 @@ TRACE_EVENT(sched_tune_tasks_update,
		__field( int,		cpu		)
		__field( int,		cpu		)
		__field( int,		tasks		)
		__field( int,		tasks		)
		__field( int,		idx		)
		__field( int,		idx		)
		__field( unsigned int,	boost		)
		__field( int,		boost		)
		__field( unsigned int,	max_boost	)
		__field( int,		max_boost	)
	),
	),


	TP_fast_assign(
	TP_fast_assign(
@@ -735,7 +735,7 @@ TRACE_EVENT(sched_tune_tasks_update,
	),
	),


	TP_printk("pid=%d comm=%s "
	TP_printk("pid=%d comm=%s "
			"cpu=%d tasks=%d idx=%d boost=%u max_boost=%u",
			"cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
		__entry->pid, __entry->comm,
		__entry->pid, __entry->comm,
		__entry->cpu, __entry->tasks, __entry->idx,
		__entry->cpu, __entry->tasks, __entry->idx,
		__entry->boost, __entry->max_boost)
		__entry->boost, __entry->max_boost)
@@ -771,7 +771,7 @@ TRACE_EVENT(sched_tune_boostgroup_update,
 */
 */
TRACE_EVENT(sched_boost_task,
TRACE_EVENT(sched_boost_task,


	TP_PROTO(struct task_struct *tsk, unsigned long util, unsigned long margin),
	TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),


	TP_ARGS(tsk, util, margin),
	TP_ARGS(tsk, util, margin),


@@ -779,7 +779,7 @@ TRACE_EVENT(sched_boost_task,
		__array( char,	comm,	TASK_COMM_LEN		)
		__array( char,	comm,	TASK_COMM_LEN		)
		__field( pid_t,		pid			)
		__field( pid_t,		pid			)
		__field( unsigned long,	util			)
		__field( unsigned long,	util			)
		__field( unsigned long,	margin			)
		__field( long,		margin			)


	),
	),


@@ -790,7 +790,7 @@ TRACE_EVENT(sched_boost_task,
		__entry->margin	= margin;
		__entry->margin	= margin;
	),
	),


	TP_printk("comm=%s pid=%d util=%lu margin=%lu",
	TP_printk("comm=%s pid=%d util=%lu margin=%ld",
		  __entry->comm, __entry->pid,
		  __entry->comm, __entry->pid,
		  __entry->util,
		  __entry->util,
		  __entry->margin)
		  __entry->margin)
+21 −16
Original line number Original line Diff line number Diff line
@@ -5250,22 +5250,25 @@ static bool cpu_overutilized(int cpu)


#ifdef CONFIG_SCHED_TUNE
#ifdef CONFIG_SCHED_TUNE


static unsigned long
static long
schedtune_margin(unsigned long signal, unsigned long boost)
schedtune_margin(unsigned long signal, long boost)
{
{
	unsigned long long margin = 0;
	long long margin = 0;


	/*
	/*
	 * Signal proportional compensation (SPC)
	 * Signal proportional compensation (SPC)
	 *
	 *
	 * The Boost (B) value is used to compute a Margin (M) which is
	 * The Boost (B) value is used to compute a Margin (M) which is
	 * proportional to the complement of the original Signal (S):
	 * proportional to the complement of the original Signal (S):
	 *   M = B * (SCHED_LOAD_SCALE - S)
	 *   M = B * (SCHED_LOAD_SCALE - S), if B is positive
	 *   M = B * S, if B is negative
	 * The obtained M could be used by the caller to "boost" S.
	 * The obtained M could be used by the caller to "boost" S.
	 */
	 */
	if (boost >= 0) {
		margin  = SCHED_LOAD_SCALE - signal;
		margin  = SCHED_LOAD_SCALE - signal;
		margin *= boost;
		margin *= boost;

	} else
		margin = -signal * boost;
	/*
	/*
	 * Fast integer division by constant:
	 * Fast integer division by constant:
	 *  Constant   :                 (C) = 100
	 *  Constant   :                 (C) = 100
@@ -5281,13 +5284,15 @@ schedtune_margin(unsigned long signal, unsigned long boost)
	margin  *= 1311;
	margin  *= 1311;
	margin >>= 17;
	margin >>= 17;


	if (boost < 0)
		margin *= -1;
	return margin;
	return margin;
}
}


static inline unsigned int
static inline int
schedtune_cpu_margin(unsigned long util, int cpu)
schedtune_cpu_margin(unsigned long util, int cpu)
{
{
	unsigned int boost;
	int boost;


#ifdef CONFIG_CGROUP_SCHEDTUNE
#ifdef CONFIG_CGROUP_SCHEDTUNE
	boost = schedtune_cpu_boost(cpu);
	boost = schedtune_cpu_boost(cpu);
@@ -5300,12 +5305,12 @@ schedtune_cpu_margin(unsigned long util, int cpu)
	return schedtune_margin(util, boost);
	return schedtune_margin(util, boost);
}
}


static inline unsigned long
static inline long
schedtune_task_margin(struct task_struct *task)
schedtune_task_margin(struct task_struct *task)
{
{
	unsigned int boost;
	int boost;
	unsigned long util;
	unsigned long util;
	unsigned long margin;
	long margin;


#ifdef CONFIG_CGROUP_SCHEDTUNE
#ifdef CONFIG_CGROUP_SCHEDTUNE
	boost = schedtune_task_boost(task);
	boost = schedtune_task_boost(task);
@@ -5323,13 +5328,13 @@ schedtune_task_margin(struct task_struct *task)


#else /* CONFIG_SCHED_TUNE */
#else /* CONFIG_SCHED_TUNE */


static inline unsigned int
static inline int
schedtune_cpu_margin(unsigned long util, int cpu)
schedtune_cpu_margin(unsigned long util, int cpu)
{
{
	return 0;
	return 0;
}
}


static inline unsigned int
static inline int
schedtune_task_margin(struct task_struct *task)
schedtune_task_margin(struct task_struct *task)
{
{
	return 0;
	return 0;
@@ -5341,7 +5346,7 @@ static inline unsigned long
boosted_cpu_util(int cpu)
boosted_cpu_util(int cpu)
{
{
	unsigned long util = cpu_util(cpu);
	unsigned long util = cpu_util(cpu);
	unsigned long margin = schedtune_cpu_margin(util, cpu);
	long margin = schedtune_cpu_margin(util, cpu);


	trace_sched_boost_cpu(cpu, util, margin);
	trace_sched_boost_cpu(cpu, util, margin);


@@ -5352,7 +5357,7 @@ static inline unsigned long
boosted_task_util(struct task_struct *task)
boosted_task_util(struct task_struct *task)
{
{
	unsigned long util = task_util(task);
	unsigned long util = task_util(task);
	unsigned long margin = schedtune_task_margin(task);
	long margin = schedtune_task_margin(task);


	trace_sched_boost_task(task, util, margin);
	trace_sched_boost_task(task, util, margin);


+16 −9
Original line number Original line Diff line number Diff line
@@ -213,10 +213,11 @@ static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
 */
 */
struct boost_groups {
struct boost_groups {
	/* Maximum boost value for all RUNNABLE tasks on a CPU */
	/* Maximum boost value for all RUNNABLE tasks on a CPU */
	unsigned boost_max;
	bool idle;
	int boost_max;
	struct {
	struct {
		/* The boost for tasks on that boost group */
		/* The boost for tasks on that boost group */
		unsigned boost;
		int boost;
		/* Count of RUNNABLE tasks on that boost group */
		/* Count of RUNNABLE tasks on that boost group */
		unsigned tasks;
		unsigned tasks;
	} group[BOOSTGROUPS_COUNT];
	} group[BOOSTGROUPS_COUNT];
@@ -229,7 +230,7 @@ static void
schedtune_cpu_update(int cpu)
schedtune_cpu_update(int cpu)
{
{
	struct boost_groups *bg;
	struct boost_groups *bg;
	unsigned boost_max;
	int boost_max;
	int idx;
	int idx;


	bg = &per_cpu(cpu_boost_groups, cpu);
	bg = &per_cpu(cpu_boost_groups, cpu);
@@ -243,9 +244,13 @@ schedtune_cpu_update(int cpu)
		 */
		 */
		if (bg->group[idx].tasks == 0)
		if (bg->group[idx].tasks == 0)
			continue;
			continue;

		boost_max = max(boost_max, bg->group[idx].boost);
		boost_max = max(boost_max, bg->group[idx].boost);
	}
	}

	/* Ensures boost_max is non-negative when all cgroup boost values
	 * are neagtive. Avoids under-accounting of cpu capacity which may cause
	 * task stacking and frequency spikes.*/
	boost_max = max(boost_max, 0);
	bg->boost_max = boost_max;
	bg->boost_max = boost_max;
}
}


@@ -391,7 +396,7 @@ int schedtune_task_boost(struct task_struct *p)
	return task_boost;
	return task_boost;
}
}


static u64
static s64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
{
	struct schedtune *st = css_st(css);
	struct schedtune *st = css_st(css);
@@ -401,11 +406,13 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft)


static int
static int
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
	    u64 boost)
	    s64 boost)
{
{
	struct schedtune *st = css_st(css);
	struct schedtune *st = css_st(css);
	unsigned threshold_idx;
	int boost_pct;


	if (boost < 0 || boost > 100)
	if (boost < -100 || boost > 100)
		return -EINVAL;
		return -EINVAL;


	st->boost = boost;
	st->boost = boost;
@@ -423,8 +430,8 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
static struct cftype files[] = {
static struct cftype files[] = {
	{
	{
		.name = "boost",
		.name = "boost",
		.read_u64 = boost_read,
		.read_s64 = boost_read,
		.write_u64 = boost_write,
		.write_s64 = boost_write,
	},
	},
	{ }	/* terminate */
	{ }	/* terminate */
};
};