Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b670ccd7 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: eval_need code and trace cleanup"

parents 64a2546e 40770d9b
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ static int sched_effective_boost(void)
static void sched_boost_disable(int type)
{
	struct sched_boost_data *sb = &sched_boosts[type];
	int next_boost;
	int next_boost, prev_boost = sched_boost_type;

	if (sb->refcount <= 0)
		return;
@@ -192,14 +192,15 @@ static void sched_boost_disable(int type)
	if (sb->refcount)
		return;

	next_boost = sched_effective_boost();
	if (next_boost == prev_boost)
		return;
	/*
	 * This boost's refcount becomes zero, so it must
	 * be disabled. Disable it first and then apply
	 * the next boost.
	 */
	sb->exit();

	next_boost = sched_effective_boost();
	sched_boosts[prev_boost].exit();
	sched_boosts[next_boost].enter();
}

@@ -232,14 +233,14 @@ static void sched_boost_enable(int type)
static void sched_boost_disable_all(void)
{
	int i;
	int prev_boost = sched_boost_type;

	for (i = SCHED_BOOST_START; i < SCHED_BOOST_END; i++) {
		if (sched_boosts[i].refcount > 0) {
			sched_boosts[i].exit();
	if (prev_boost != NO_BOOST) {
		sched_boosts[prev_boost].exit();
		for (i = SCHED_BOOST_START; i < SCHED_BOOST_END; i++)
			sched_boosts[i].refcount = 0;
	}
}
}

static void _sched_set_boost(int type)
{
+14 −11
Original line number Diff line number Diff line
@@ -796,8 +796,8 @@ static bool eval_need(struct cluster_data *cluster)
	unsigned long flags;
	struct cpu_data *c;
	unsigned int need_cpus = 0, last_need, thres_idx;
	int ret = 0;
	bool need_flag = false;
	bool adj_now = false;
	bool adj_possible = false;
	unsigned int new_need;
	s64 now, elapsed;

@@ -827,13 +827,12 @@ static bool eval_need(struct cluster_data *cluster)
		need_cpus = apply_task_need(cluster, need_cpus);
	}
	new_need = apply_limits(cluster, need_cpus);
	need_flag = adjustment_possible(cluster, new_need);

	last_need = cluster->need_cpus;
	now = ktime_to_ms(ktime_get());

	if (new_need > cluster->active_cpus) {
		ret = 1;
		adj_now = true;
	} else {
		/*
		 * When there is no change in need and there are no more
@@ -842,23 +841,27 @@ static bool eval_need(struct cluster_data *cluster)
		 */
		if (new_need == last_need && new_need == cluster->active_cpus) {
			cluster->need_ts = now;
			spin_unlock_irqrestore(&state_lock, flags);
			return 0;
			adj_now = false;
			goto unlock;
		}

		elapsed = now - cluster->need_ts;
		ret = elapsed >= cluster->offline_delay_ms;
		adj_now = elapsed >= cluster->offline_delay_ms;
	}

	if (ret) {
	if (adj_now) {
		adj_possible = adjustment_possible(cluster, new_need);
		cluster->need_ts = now;
		cluster->need_cpus = new_need;
	}

unlock:
	trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
				 ret && need_flag);
				 cluster->active_cpus, adj_now, adj_possible,
				 adj_now && adj_possible, cluster->need_ts);
	spin_unlock_irqrestore(&state_lock, flags);

	return ret && need_flag;
	return adj_now && adj_possible;
}

static void apply_need(struct cluster_data *cluster)
+23 −11
Original line number Diff line number Diff line
@@ -437,23 +437,35 @@ TRACE_EVENT(sched_load_to_gov,

TRACE_EVENT(core_ctl_eval_need,

	TP_PROTO(unsigned int cpu, unsigned int old_need,
		unsigned int new_need, unsigned int updated),
	TP_ARGS(cpu, old_need, new_need, updated),
	TP_PROTO(unsigned int cpu, unsigned int last_need,
		unsigned int new_need, unsigned int active_cpus,
		unsigned int adj_now, unsigned int adj_possible,
		unsigned int updated, s64 need_ts),
	TP_ARGS(cpu, last_need, new_need, active_cpus, adj_now, adj_possible, updated, need_ts),
	TP_STRUCT__entry(
		__field(u32, cpu)
		__field(u32, old_need)
		__field(u32, last_need)
		__field(u32, new_need)
		__field(u32, active_cpus)
		__field(u32, adj_now)
		__field(u32, adj_possible)
		__field(u32, updated)
		__field(s64, need_ts)
	),
	TP_fast_assign(
		__entry->cpu		= cpu;
		__entry->old_need = old_need;
		__entry->last_need	= last_need;
		__entry->new_need	= new_need;
		__entry->active_cpus	= active_cpus;
		__entry->adj_now	= adj_now;
		__entry->adj_possible	= adj_possible;
		__entry->updated	= updated;
		__entry->need_ts	= need_ts;
	),
	TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
			__entry->old_need, __entry->new_need, __entry->updated)
	TP_printk("cpu=%u last_need=%u new_need=%u active_cpus=%u adj_now=%u adj_possible=%u updated=%u need_ts=%llu",
		  __entry->cpu,	__entry->last_need, __entry->new_need,
		  __entry->active_cpus, __entry->adj_now, __entry->adj_possible,
		  __entry->updated, __entry->need_ts)
);

TRACE_EVENT(core_ctl_set_busy,