Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d9ff0d77 authored by Joonwoo Park's avatar Joonwoo Park Committed by Jeevan Shriram
Browse files

sched: simplify CPU frequency estimation and cycle counter API



Most of CPUs increase cycle counter by one every cycle which makes
frequency = cycles / time_delta is correct.  Therefore it's reasonable
to get rid of current cpu_cycle_max_scale_factor and ask cycle counter
read callback function to return scaled counter value when it's needed
in such a case that cycle counter doesn't increase every cycle.

Thus multiply NSEC_PER_SEC / HZ_PER_KHZ to CPU cycle counter delta
as we calculate frequency in khz and remove cpu_cycle_max_scale_factor.
This allows us to simplify frequency estimation and cycle counter API.

Change-Id: Ie7a628d4bc77c9b6c769f6099ce8d75740262a14
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 167eebb0
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -3370,7 +3370,6 @@ static inline unsigned long rlimit_max(unsigned int limit)


struct cpu_cycle_counter_cb {
struct cpu_cycle_counter_cb {
	u64 (*get_cpu_cycle_counter)(int cpu);
	u64 (*get_cpu_cycle_counter)(int cpu);
	u32 (*get_cpu_cycles_max_per_us)(int cpu);
};
};
int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);


+3 −29
Original line number Original line Diff line number Diff line
@@ -1403,7 +1403,6 @@ static struct sched_cluster init_cluster = {
	.max_mitigated_freq	=	UINT_MAX,
	.max_mitigated_freq	=	UINT_MAX,
	.min_freq		=	1,
	.min_freq		=	1,
	.max_possible_freq	=	1,
	.max_possible_freq	=	1,
	.cpu_cycle_max_scale_factor	= 1,
	.dstate			=	0,
	.dstate			=	0,
	.dstate_wakeup_energy	=	0,
	.dstate_wakeup_energy	=	0,
	.dstate_wakeup_latency	=	0,
	.dstate_wakeup_latency	=	0,
@@ -1553,7 +1552,6 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
	cluster->max_mitigated_freq	=	UINT_MAX;
	cluster->max_mitigated_freq	=	UINT_MAX;
	cluster->min_freq		=	1;
	cluster->min_freq		=	1;
	cluster->max_possible_freq	=	1;
	cluster->max_possible_freq	=	1;
	cluster->cpu_cycle_max_scale_factor =	1;
	cluster->dstate			=	0;
	cluster->dstate			=	0;
	cluster->dstate_wakeup_energy	=	0;
	cluster->dstate_wakeup_energy	=	0;
	cluster->dstate_wakeup_latency	=	0;
	cluster->dstate_wakeup_latency	=	0;
@@ -1620,38 +1618,15 @@ static void init_clusters(void)
	INIT_LIST_HEAD(&cluster_head);
	INIT_LIST_HEAD(&cluster_head);
}
}


static inline void
__update_cpu_cycle_max_possible_freq(struct sched_cluster *cluster)
{
	int cpu = cluster_first_cpu(cluster);

	cluster->cpu_cycle_max_scale_factor =
	    div64_u64(cluster->max_possible_freq * NSEC_PER_USEC,
		      cpu_cycle_counter_cb.get_cpu_cycles_max_per_us(cpu));
}

static inline void
update_cpu_cycle_max_possible_freq(struct sched_cluster *cluster)
{
	if (!use_cycle_counter)
		return;

	__update_cpu_cycle_max_possible_freq(cluster);
}

int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
{
	struct sched_cluster *cluster = NULL;

	mutex_lock(&cluster_lock);
	mutex_lock(&cluster_lock);
	if (!cb->get_cpu_cycle_counter || !cb->get_cpu_cycles_max_per_us) {
	if (!cb->get_cpu_cycle_counter) {
		mutex_unlock(&cluster_lock);
		mutex_unlock(&cluster_lock);
		return -EINVAL;
		return -EINVAL;
	}
	}


	cpu_cycle_counter_cb = *cb;
	cpu_cycle_counter_cb = *cb;
	for_each_sched_cluster(cluster)
		__update_cpu_cycle_max_possible_freq(cluster);
	use_cycle_counter = true;
	use_cycle_counter = true;
	mutex_unlock(&cluster_lock);
	mutex_unlock(&cluster_lock);


@@ -1931,8 +1906,7 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq,
	int cpu = cpu_of(rq);
	int cpu = cpu_of(rq);
	int sf;
	int sf;


	delta = DIV64_U64_ROUNDUP(delta * cc->cycles *
	delta = DIV64_U64_ROUNDUP(delta * cc->cycles,
				  cpu_cycle_max_scale_factor(cpu),
				  max_possible_freq * cc->time);
				  max_possible_freq * cc->time);
	sf = DIV_ROUND_UP(cpu_efficiency(cpu) * 1024, max_possible_efficiency);
	sf = DIV_ROUND_UP(cpu_efficiency(cpu) * 1024, max_possible_efficiency);


@@ -2613,6 +2587,7 @@ get_task_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
		cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
		cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
	else
	else
		cc.cycles = cur_cycles - p->cpu_cycles;
		cc.cycles = cur_cycles - p->cpu_cycles;
	cc.cycles = cc.cycles * NSEC_PER_MSEC;
	cc.time = wallclock - p->ravg.mark_start;
	cc.time = wallclock - p->ravg.mark_start;
	BUG_ON((s64)cc.time < 0);
	BUG_ON((s64)cc.time < 0);


@@ -3688,7 +3663,6 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,


			sort_clusters();
			sort_clusters();
			update_all_clusters_stats();
			update_all_clusters_stats();
			update_cpu_cycle_max_possible_freq(cluster);
			mutex_unlock(&cluster_lock);
			mutex_unlock(&cluster_lock);
			continue;
			continue;
		}
		}
+1 −11
Original line number Original line Diff line number Diff line
@@ -389,11 +389,6 @@ struct sched_cluster {
	 */
	 */
	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
	unsigned int max_possible_freq;
	unsigned int max_possible_freq;
	/*
	 * cpu_cycle_max_scale_factor represents number of cycles per NSEC at
	 * CPU's fmax.
	 */
	u32 cpu_cycle_max_scale_factor;
	bool freq_init_done;
	bool freq_init_done;
	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
	unsigned int static_cluster_pwr_cost;
	unsigned int static_cluster_pwr_cost;
@@ -1137,14 +1132,9 @@ static inline int cpu_max_power_cost(int cpu)
	return cpu_rq(cpu)->cluster->max_power_cost;
	return cpu_rq(cpu)->cluster->max_power_cost;
}
}


static inline int cpu_cycle_max_scale_factor(int cpu)
{
	return cpu_rq(cpu)->cluster->cpu_cycle_max_scale_factor;
}

static inline u32 cpu_cycles_to_freq(int cpu, u64 cycles, u32 period)
static inline u32 cpu_cycles_to_freq(int cpu, u64 cycles, u32 period)
{
{
	return div64_u64(cycles * cpu_cycle_max_scale_factor(cpu), period);
	return div64_u64(cycles, period);
}
}


static inline bool hmp_capable(void)
static inline bool hmp_capable(void)