Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 504854af authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Improve the scheduler"

parents 97ce2bb7 3307b2de
Loading
Loading
Loading
Loading
+6 −6
Original line number Original line Diff line number Diff line
@@ -545,11 +545,11 @@ DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
TRACE_EVENT(sched_load_to_gov,
TRACE_EVENT(sched_load_to_gov,


	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
		u64 freq_aggr_thresh, u64 load, int policy,
		int freq_aggr, u64 load, int policy,
		int big_task_rotation,
		int big_task_rotation,
		unsigned int sysctl_sched_little_cluster_coloc_fmin_khz,
		unsigned int sysctl_sched_little_cluster_coloc_fmin_khz,
		u64 coloc_boost_load),
		u64 coloc_boost_load),
	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy,
	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
		big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz,
		big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz,
		coloc_boost_load),
		coloc_boost_load),


@@ -558,7 +558,7 @@ TRACE_EVENT(sched_load_to_gov,
		__field(int,    policy)
		__field(int,    policy)
		__field(int,	ed_task_pid)
		__field(int,	ed_task_pid)
		__field(u64,    aggr_grp_load)
		__field(u64,    aggr_grp_load)
		__field(u64,    freq_aggr_thresh)
		__field(int,    freq_aggr)
		__field(u64,    tt_load)
		__field(u64,    tt_load)
		__field(u64,	rq_ps)
		__field(u64,	rq_ps)
		__field(u64,	grp_rq_ps)
		__field(u64,	grp_rq_ps)
@@ -577,7 +577,7 @@ TRACE_EVENT(sched_load_to_gov,
		__entry->policy		= policy;
		__entry->policy		= policy;
		__entry->ed_task_pid	= rq->ed_task ? rq->ed_task->pid : -1;
		__entry->ed_task_pid	= rq->ed_task ? rq->ed_task->pid : -1;
		__entry->aggr_grp_load	= aggr_grp_load;
		__entry->aggr_grp_load	= aggr_grp_load;
		__entry->freq_aggr_thresh = freq_aggr_thresh;
		__entry->freq_aggr	= freq_aggr;
		__entry->tt_load	= tt_load;
		__entry->tt_load	= tt_load;
		__entry->rq_ps		= rq->prev_runnable_sum;
		__entry->rq_ps		= rq->prev_runnable_sum;
		__entry->grp_rq_ps	= rq->grp_time.prev_runnable_sum;
		__entry->grp_rq_ps	= rq->grp_time.prev_runnable_sum;
@@ -592,9 +592,9 @@ TRACE_EVENT(sched_load_to_gov,
		__entry->coloc_boost_load = coloc_boost_load;
		__entry->coloc_boost_load = coloc_boost_load;
	),
	),


	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu",
	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu",
		__entry->cpu, __entry->policy, __entry->ed_task_pid,
		__entry->cpu, __entry->policy, __entry->ed_task_pid,
		__entry->aggr_grp_load, __entry->freq_aggr_thresh,
		__entry->aggr_grp_load, __entry->freq_aggr,
		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
		__entry->big_task_rotation,
		__entry->big_task_rotation,
+14 −10
Original line number Original line Diff line number Diff line
@@ -4,6 +4,7 @@
 */
 */


#include "sched.h"
#include "sched.h"
#include "walt.h"
#include <linux/of.h>
#include <linux/of.h>
#include <linux/sched/core_ctl.h>
#include <linux/sched/core_ctl.h>
#include <trace/events/sched.h>
#include <trace/events/sched.h>
@@ -19,7 +20,6 @@ unsigned int sysctl_sched_boost;
static enum sched_boost_policy boost_policy;
static enum sched_boost_policy boost_policy;
static enum sched_boost_policy boost_policy_dt = SCHED_BOOST_NONE;
static enum sched_boost_policy boost_policy_dt = SCHED_BOOST_NONE;
static DEFINE_MUTEX(boost_mutex);
static DEFINE_MUTEX(boost_mutex);
static unsigned int freq_aggr_threshold_backup;
static int boost_refcount[MAX_NUM_BOOST_TYPE];
static int boost_refcount[MAX_NUM_BOOST_TYPE];


/*
/*
@@ -69,6 +69,7 @@ static void _sched_set_boost(int type)
	case NO_BOOST: /* All boost clear */
	case NO_BOOST: /* All boost clear */
		if (boost_refcount[FULL_THROTTLE_BOOST] > 0) {
		if (boost_refcount[FULL_THROTTLE_BOOST] > 0) {
			core_ctl_set_boost(false);
			core_ctl_set_boost(false);
			walt_enable_frequency_aggregation(false);
			boost_refcount[FULL_THROTTLE_BOOST] = 0;
			boost_refcount[FULL_THROTTLE_BOOST] = 0;
		}
		}
		if (boost_refcount[CONSERVATIVE_BOOST] > 0) {
		if (boost_refcount[CONSERVATIVE_BOOST] > 0) {
@@ -76,8 +77,7 @@ static void _sched_set_boost(int type)
			boost_refcount[CONSERVATIVE_BOOST] = 0;
			boost_refcount[CONSERVATIVE_BOOST] = 0;
		}
		}
		if (boost_refcount[RESTRAINED_BOOST] > 0) {
		if (boost_refcount[RESTRAINED_BOOST] > 0) {
			update_freq_aggregate_threshold(
			walt_enable_frequency_aggregation(false);
				freq_aggr_threshold_backup);
			boost_refcount[RESTRAINED_BOOST] = 0;
			boost_refcount[RESTRAINED_BOOST] = 0;
		}
		}
		break;
		break;
@@ -87,6 +87,8 @@ static void _sched_set_boost(int type)
		if (boost_refcount[FULL_THROTTLE_BOOST] == 1) {
		if (boost_refcount[FULL_THROTTLE_BOOST] == 1) {
			core_ctl_set_boost(true);
			core_ctl_set_boost(true);
			restore_cgroup_boost_settings();
			restore_cgroup_boost_settings();
			if (!boost_refcount[RESTRAINED_BOOST])
				walt_enable_frequency_aggregation(true);
		}
		}
		break;
		break;


@@ -99,10 +101,9 @@ static void _sched_set_boost(int type)


	case RESTRAINED_BOOST:
	case RESTRAINED_BOOST:
	    boost_refcount[RESTRAINED_BOOST]++;
	    boost_refcount[RESTRAINED_BOOST]++;
		if (boost_refcount[RESTRAINED_BOOST] == 1) {
		if (boost_refcount[RESTRAINED_BOOST] == 1 &&
			freq_aggr_threshold_backup =
		    !boost_refcount[FULL_THROTTLE_BOOST])
			    update_freq_aggregate_threshold(1);
			walt_enable_frequency_aggregation(true);
		}
		break;
		break;


	case FULL_THROTTLE_BOOST_DISABLE:
	case FULL_THROTTLE_BOOST_DISABLE:
@@ -112,6 +113,9 @@ static void _sched_set_boost(int type)
				core_ctl_set_boost(false);
				core_ctl_set_boost(false);
				if (boost_refcount[CONSERVATIVE_BOOST] >= 1)
				if (boost_refcount[CONSERVATIVE_BOOST] >= 1)
					update_cgroup_boost_settings();
					update_cgroup_boost_settings();
				if (!boost_refcount[RESTRAINED_BOOST])
					walt_enable_frequency_aggregation(
								false);
			}
			}
		}
		}
		break;
		break;
@@ -127,9 +131,9 @@ static void _sched_set_boost(int type)
	case RESTRAINED_BOOST_DISABLE:
	case RESTRAINED_BOOST_DISABLE:
		if (boost_refcount[RESTRAINED_BOOST] >= 1) {
		if (boost_refcount[RESTRAINED_BOOST] >= 1) {
			boost_refcount[RESTRAINED_BOOST]--;
			boost_refcount[RESTRAINED_BOOST]--;
			if (!boost_refcount[RESTRAINED_BOOST])
			if (!boost_refcount[RESTRAINED_BOOST] &&
				update_freq_aggregate_threshold(
			    !boost_refcount[FULL_THROTTLE_BOOST])
					freq_aggr_threshold_backup);
				walt_enable_frequency_aggregation(false);
		}
		}
		break;
		break;


+0 −1
Original line number Original line Diff line number Diff line
@@ -2759,7 +2759,6 @@ extern int update_preferred_cluster(struct related_thread_group *grp,
			struct task_struct *p, u32 old_load);
			struct task_struct *p, u32 old_load);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);


#define NO_BOOST 0
#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
#define FULL_THROTTLE_BOOST 1
+3 −27
Original line number Original line Diff line number Diff line
@@ -287,7 +287,7 @@ void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
 *	C1 busy time = 5 + 5 + 6 = 16ms
 *	C1 busy time = 5 + 5 + 6 = 16ms
 *
 *
 */
 */
__read_mostly int sched_freq_aggregate_threshold;
__read_mostly bool sched_freq_aggr_en;


static u64
static u64
update_window_start(struct rq *rq, u64 wallclock, int event)
update_window_start(struct rq *rq, u64 wallclock, int event)
@@ -489,7 +489,6 @@ static u32 top_task_load(struct rq *rq)
u64 freq_policy_load(struct rq *rq)
u64 freq_policy_load(struct rq *rq)
{
{
	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
	int freq_aggr_thresh = sched_freq_aggregate_threshold;
	struct sched_cluster *cluster = rq->cluster;
	struct sched_cluster *cluster = rq->cluster;
	u64 aggr_grp_load = cluster->aggr_grp_load;
	u64 aggr_grp_load = cluster->aggr_grp_load;
	u64 load, tt_load = 0;
	u64 load, tt_load = 0;
@@ -500,7 +499,7 @@ u64 freq_policy_load(struct rq *rq)
		goto done;
		goto done;
	}
	}


	if (aggr_grp_load > freq_aggr_thresh)
	if (sched_freq_aggr_en)
		load = rq->prev_runnable_sum + aggr_grp_load;
		load = rq->prev_runnable_sum + aggr_grp_load;
	else
	else
		load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
		load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
@@ -523,7 +522,7 @@ u64 freq_policy_load(struct rq *rq)
	}
	}


done:
done:
	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, freq_aggr_thresh,
	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
				load, reporting_policy, walt_rotation_enabled,
				load, reporting_policy, walt_rotation_enabled,
				sysctl_sched_little_cluster_coloc_fmin_khz,
				sysctl_sched_little_cluster_coloc_fmin_khz,
				coloc_boost_load);
				coloc_boost_load);
@@ -2489,15 +2488,10 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 */
 */
unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
unsigned int __read_mostly sysctl_sched_enable_thread_grouping;


/* Maximum allowed threshold before freq aggregation must be enabled */
#define MAX_FREQ_AGGR_THRESH 1000

struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
static LIST_HEAD(active_related_thread_groups);
static LIST_HEAD(active_related_thread_groups);
DEFINE_RWLOCK(related_thread_group_lock);
DEFINE_RWLOCK(related_thread_group_lock);


unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;

/*
/*
 * Task groups whose aggregate demand on a cpu is more than
 * Task groups whose aggregate demand on a cpu is more than
 * sched_group_upmigrate need to be up-migrated if possible.
 * sched_group_upmigrate need to be up-migrated if possible.
@@ -2649,23 +2643,6 @@ DEFINE_MUTEX(policy_mutex);
#define pct_to_real(tunable)	\
#define pct_to_real(tunable)	\
		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
		(div64_u64((u64)tunable * (u64)max_task_load(), 100))


unsigned int update_freq_aggregate_threshold(unsigned int threshold)
{
	unsigned int old_threshold;

	mutex_lock(&policy_mutex);

	old_threshold = sysctl_sched_freq_aggregate_threshold_pct;

	sysctl_sched_freq_aggregate_threshold_pct = threshold;
	sched_freq_aggregate_threshold =
		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);

	mutex_unlock(&policy_mutex);

	return old_threshold;
}

#define ADD_TASK	0
#define ADD_TASK	0
#define REM_TASK	1
#define REM_TASK	1


@@ -2896,7 +2873,6 @@ static int __init create_default_coloc_group(void)
	list_add(&grp->list, &active_related_thread_groups);
	list_add(&grp->list, &active_related_thread_groups);
	write_unlock_irqrestore(&related_thread_group_lock, flags);
	write_unlock_irqrestore(&related_thread_group_lock, flags);


	update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
	return 0;
	return 0;
}
}
late_initcall(create_default_coloc_group);
late_initcall(create_default_coloc_group);
+6 −1
Original line number Original line Diff line number Diff line
@@ -39,7 +39,6 @@ extern struct mutex cluster_lock;
extern rwlock_t related_thread_group_lock;
extern rwlock_t related_thread_group_lock;
extern __read_mostly unsigned int sched_ravg_hist_size;
extern __read_mostly unsigned int sched_ravg_hist_size;
extern __read_mostly unsigned int sched_freq_aggregate;
extern __read_mostly unsigned int sched_freq_aggregate;
extern __read_mostly int sched_freq_aggregate_threshold;
extern __read_mostly unsigned int sched_window_stats_policy;
extern __read_mostly unsigned int sched_window_stats_policy;
extern __read_mostly unsigned int sched_group_upmigrate;
extern __read_mostly unsigned int sched_group_upmigrate;
extern __read_mostly unsigned int sched_group_downmigrate;
extern __read_mostly unsigned int sched_group_downmigrate;
@@ -298,6 +297,12 @@ extern void walt_rotation_checkpoint(int nr_big);
extern unsigned int walt_rotation_enabled;
extern unsigned int walt_rotation_enabled;
extern unsigned int walt_get_default_coloc_group_load(void);
extern unsigned int walt_get_default_coloc_group_load(void);


extern __read_mostly bool sched_freq_aggr_en;
static inline void walt_enable_frequency_aggregation(bool enable)
{
	sched_freq_aggr_en = enable;
}

#else /* CONFIG_SCHED_WALT */
#else /* CONFIG_SCHED_WALT */


static inline void walt_sched_init_rq(struct rq *rq) { }
static inline void walt_sched_init_rq(struct rq *rq) { }