Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1a1b2911 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/cpufreq_schedutil: create a function for common steps"

parents cac66d1e 86916de3
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -40,7 +40,6 @@ extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost;
extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;

extern int
@@ -120,13 +119,6 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
extern int sysctl_schedstats(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp,
				 loff_t *ppos);

#ifdef CONFIG_SCHED_WALT
extern int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);
#endif

#define LIB_PATH_LENGTH 512
extern char sched_lib_name[LIB_PATH_LENGTH];
extern unsigned int sched_lib_mask_force;
+6 −4
Original line number Diff line number Diff line
@@ -576,8 +576,8 @@ TRACE_EVENT(sugov_util_update,
	    TP_PROTO(int cpu,
		     unsigned long util, unsigned long avg_cap,
		     unsigned long max_cap, unsigned long nl, unsigned long pl,
		     unsigned int flags),
	    TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, flags),
		     unsigned int rtgb, unsigned int flags),
	    TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, rtgb, flags),
	    TP_STRUCT__entry(
		    __field(int, cpu)
		    __field(unsigned long, util)
@@ -585,6 +585,7 @@ TRACE_EVENT(sugov_util_update,
		    __field(unsigned long, max_cap)
		    __field(unsigned long, nl)
		    __field(unsigned long, pl)
		    __field(unsigned int, rtgb)
		    __field(unsigned int, flags)
	    ),
	    TP_fast_assign(
@@ -594,12 +595,13 @@ TRACE_EVENT(sugov_util_update,
		    __entry->max_cap = max_cap;
		    __entry->nl = nl;
		    __entry->pl = pl;
		    __entry->rtgb = rtgb;
		    __entry->flags = flags;
	    ),
	    TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu flags=0x%x",
	    TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu rtgb=%u flags=0x%x",
		      __entry->cpu, __entry->util, __entry->avg_cap,
		      __entry->max_cap, __entry->nl,
		      __entry->pl, __entry->flags)
		      __entry->pl, __entry->rtgb, __entry->flags)
);

TRACE_EVENT(sugov_next_freq,
+4 −15
Original line number Diff line number Diff line
@@ -495,12 +495,9 @@ TRACE_EVENT(sched_load_to_gov,

	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
		int freq_aggr, u64 load, int policy,
		int big_task_rotation,
		unsigned int sysctl_sched_little_cluster_coloc_fmin_khz,
		u64 coloc_boost_load),
		int big_task_rotation),
	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
		big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz,
		coloc_boost_load),
		big_task_rotation),

	TP_STRUCT__entry(
		__field(int,	cpu)
@@ -516,9 +513,6 @@ TRACE_EVENT(sched_load_to_gov,
		__field(u64,	pl)
		__field(u64,    load)
		__field(int,    big_task_rotation)
		__field(unsigned int,
				sysctl_sched_little_cluster_coloc_fmin_khz)
		__field(u64,	coloc_boost_load)
	),

	TP_fast_assign(
@@ -536,18 +530,13 @@ TRACE_EVENT(sched_load_to_gov,
					rq->walt_stats.pred_demands_sum_scaled;
		__entry->load		= load;
		__entry->big_task_rotation = big_task_rotation;
		__entry->sysctl_sched_little_cluster_coloc_fmin_khz =
				sysctl_sched_little_cluster_coloc_fmin_khz;
		__entry->coloc_boost_load = coloc_boost_load;
	),

	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu",
	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
		__entry->cpu, __entry->policy, __entry->ed_task_pid,
		__entry->aggr_grp_load, __entry->freq_aggr,
		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
		__entry->big_task_rotation,
		__entry->sysctl_sched_little_cluster_coloc_fmin_khz,
		__entry->coloc_boost_load)
		__entry->big_task_rotation)
);
#endif
+89 −10
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@ struct sugov_tunables {
	unsigned int		down_rate_limit_us;
	unsigned int		hispeed_load;
	unsigned int		hispeed_freq;
	unsigned int		rtg_boost_freq;
	bool			pl;
};

@@ -36,6 +37,7 @@ struct sugov_policy {
	struct sugov_tunables	*tunables;
	struct list_head	tunables_hook;
	unsigned long hispeed_util;
	unsigned long rtg_boost_util;
	unsigned long max;

	raw_spinlock_t		update_lock;	/* For shared policies */
@@ -551,11 +553,15 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }

#define NL_RATIO 75
#define DEFAULT_HISPEED_LOAD 90
#define DEFAULT_CPU0_RTG_BOOST_FREQ 1000000
#define DEFAULT_CPU4_RTG_BOOST_FREQ 0
#define DEFAULT_CPU7_RTG_BOOST_FREQ 0
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
			      unsigned long *max)
{
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
	bool is_rtg_boost = sg_cpu->walt_load.rtgb_active;
	unsigned long nl = sg_cpu->walt_load.nl;
	unsigned long cpu_util = sg_cpu->util;
	bool is_hiload;
@@ -563,6 +569,9 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
	if (use_pelt())
		return;

	if (is_rtg_boost)
		*util = max(*util, sg_policy->rtg_boost_util);

	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
					   sg_policy->tunables->hispeed_load,
					   100));
@@ -587,12 +596,22 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_p
		sg_policy->need_freq_update = true;
}

static inline unsigned long target_util(struct sugov_policy *sg_policy,
				  unsigned int freq)
{
	unsigned long util;

	util = freq_to_util(sg_policy, freq);
	util = mult_frac(util, TARGET_LOAD, 100);
	return util;
}

static void sugov_update_single(struct update_util_data *hook, u64 time,
				unsigned int flags)
{
	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	unsigned long util, max, hs_util;
	unsigned long util, max, hs_util, boost_util;
	unsigned int next_f;
	bool busy;

@@ -615,10 +634,13 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,

	if (sg_policy->max != max) {
		sg_policy->max = max;
		hs_util = freq_to_util(sg_policy,
		hs_util = target_util(sg_policy,
				       sg_policy->tunables->hispeed_freq);
		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
		sg_policy->hispeed_util = hs_util;

		boost_util = target_util(sg_policy,
				    sg_policy->tunables->rtg_boost_freq);
		sg_policy->rtg_boost_util = boost_util;
	}

	util = sugov_iowait_apply(sg_cpu, time, util, max);
@@ -627,7 +649,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,

	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
				sg_policy->avg_cap, max, sg_cpu->walt_load.nl,
				sg_cpu->walt_load.pl, flags);
				sg_cpu->walt_load.pl,
				sg_cpu->walt_load.rtgb_active, flags);

	sugov_walt_adjust(sg_cpu, &util, &max);
	next_f = get_next_freq(sg_policy, util, max);
@@ -710,7 +733,7 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
{
	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	unsigned long hs_util;
	unsigned long hs_util, boost_util;
	unsigned int next_f;

	if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
@@ -722,10 +745,13 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)

	if (sg_policy->max != sg_cpu->max) {
		sg_policy->max = sg_cpu->max;
		hs_util = freq_to_util(sg_policy,
		hs_util = target_util(sg_policy,
					sg_policy->tunables->hispeed_freq);
		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
		sg_policy->hispeed_util = hs_util;

		boost_util = target_util(sg_policy,
				    sg_policy->tunables->rtg_boost_freq);
		sg_policy->rtg_boost_util = boost_util;
	}

	sugov_iowait_boost(sg_cpu, time, flags);
@@ -737,7 +763,8 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)

	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
				sg_cpu->max, sg_cpu->walt_load.nl,
				sg_cpu->walt_load.pl, flags);
				sg_cpu->walt_load.pl,
				sg_cpu->walt_load.rtgb_active, flags);

	if (sugov_should_update_freq(sg_policy, time) &&
	    !(flags & SCHED_CPUFREQ_CONTINUE)) {
@@ -909,9 +936,8 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
	tunables->hispeed_freq = val;
	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		hs_util = freq_to_util(sg_policy,
		hs_util = target_util(sg_policy,
					sg_policy->tunables->hispeed_freq);
		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
		sg_policy->hispeed_util = hs_util;
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	}
@@ -919,6 +945,37 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
	return count;
}

static ssize_t rtg_boost_freq_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->rtg_boost_freq);
}

static ssize_t rtg_boost_freq_store(struct gov_attr_set *attr_set,
				    const char *buf, size_t count)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
	unsigned int val;
	struct sugov_policy *sg_policy;
	unsigned long boost_util;
	unsigned long flags;

	if (kstrtouint(buf, 10, &val))
		return -EINVAL;

	tunables->rtg_boost_freq = val;
	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		boost_util = target_util(sg_policy,
					  sg_policy->tunables->rtg_boost_freq);
		sg_policy->rtg_boost_util = boost_util;
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	}

	return count;
}

static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
@@ -939,6 +996,7 @@ static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,

static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
static struct governor_attr rtg_boost_freq = __ATTR_RW(rtg_boost_freq);
static struct governor_attr pl = __ATTR_RW(pl);

static struct attribute *sugov_attributes[] = {
@@ -946,6 +1004,7 @@ static struct attribute *sugov_attributes[] = {
	&down_rate_limit_us.attr,
	&hispeed_load.attr,
	&hispeed_freq.attr,
	&rtg_boost_freq.attr,
	&pl.attr,
	NULL
};
@@ -1059,6 +1118,7 @@ static void sugov_tunables_save(struct cpufreq_policy *policy,

	cached->pl = tunables->pl;
	cached->hispeed_load = tunables->hispeed_load;
	cached->rtg_boost_freq = tunables->rtg_boost_freq;
	cached->hispeed_freq = tunables->hispeed_freq;
	cached->up_rate_limit_us = tunables->up_rate_limit_us;
	cached->down_rate_limit_us = tunables->down_rate_limit_us;
@@ -1083,6 +1143,7 @@ static void sugov_tunables_restore(struct cpufreq_policy *policy)

	tunables->pl = cached->pl;
	tunables->hispeed_load = cached->hispeed_load;
	tunables->rtg_boost_freq = cached->rtg_boost_freq;
	tunables->hispeed_freq = cached->hispeed_freq;
	tunables->up_rate_limit_us = cached->up_rate_limit_us;
	tunables->down_rate_limit_us = cached->down_rate_limit_us;
@@ -1092,6 +1153,7 @@ static int sugov_init(struct cpufreq_policy *policy)
{
	struct sugov_policy *sg_policy;
	struct sugov_tunables *tunables;
	unsigned long util;
	int ret = 0;

	/* State should be equivalent to EXIT */
@@ -1135,8 +1197,25 @@ static int sugov_init(struct cpufreq_policy *policy)
	tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
	tunables->hispeed_freq = 0;

	switch (policy->cpu) {
	default:
	case 0:
		tunables->rtg_boost_freq = DEFAULT_CPU0_RTG_BOOST_FREQ;
		break;
	case 4:
		tunables->rtg_boost_freq = DEFAULT_CPU4_RTG_BOOST_FREQ;
		break;
	case 7:
		tunables->rtg_boost_freq = DEFAULT_CPU7_RTG_BOOST_FREQ;
		break;
	}

	policy->governor_data = sg_policy;
	sg_policy->tunables = tunables;

	util = target_util(sg_policy, sg_policy->tunables->rtg_boost_freq);
	sg_policy->rtg_boost_util = util;

	stale_ns = sched_ravg_window + (sched_ravg_window >> 3);

	sugov_tunables_restore(policy);
+3 −3
Original line number Diff line number Diff line
@@ -143,7 +143,6 @@ struct sched_cluster {
	unsigned int max_possible_freq;
	bool freq_init_done;
	u64 aggr_grp_load;
	u64 coloc_boost_load;
};

extern cpumask_t asym_cap_sibling_cpus;
@@ -2149,6 +2148,7 @@ struct sched_walt_cpu_load {
	unsigned long prev_window_util;
	unsigned long nl;
	unsigned long pl;
	bool rtgb_active;
	u64 ws;
};

@@ -2176,6 +2176,7 @@ extern unsigned long boosted_cpu_util(int cpu, unsigned long other_util,
u64 freq_policy_load(struct rq *rq);

extern u64 walt_load_reported_window;
extern bool rtgb_active;

static inline unsigned long
__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
@@ -2209,6 +2210,7 @@ __cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
		walt_load->nl = nl;
		walt_load->pl = pl;
		walt_load->ws = walt_load_reported_window;
		walt_load->rtgb_active = rtgb_active;
	}

	return (util >= capacity) ? capacity : util;
@@ -3129,7 +3131,6 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
	return policy;
}

extern void walt_map_freq_to_load(void);
extern void walt_update_min_max_capacity(void);

static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
@@ -3283,7 +3284,6 @@ static inline unsigned int power_cost(int cpu, u64 demand)
#endif

static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
static inline void walt_map_freq_to_load(void) { }
static inline void walt_update_min_max_capacity(void) { }
#endif	/* CONFIG_SCHED_WALT */

Loading