Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 18204df0 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "cpufreq: schedutil: Add cpu_frequency traces for each cpu in the policy"

parents 893fdac4 714e43c8
Loading
Loading
Loading
Loading
+21 −7
Original line number Diff line number Diff line
@@ -123,6 +123,15 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
	return delta_ns >= sg_policy->min_rate_limit_ns;
}

static inline bool use_pelt(void)
{
#ifdef CONFIG_SCHED_WALT
	return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
#else
	return true;
#endif
}

static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
				     unsigned int next_freq)
{
@@ -160,6 +169,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
			      unsigned int next_freq)
{
	struct cpufreq_policy *policy = sg_policy->policy;
	unsigned int cpu;

	if (!sugov_update_next_freq(sg_policy, time, next_freq))
		return;
@@ -169,6 +179,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
		return;

	policy->cur = next_freq;
	for_each_cpu(cpu, policy->cpus)
		trace_cpu_frequency(next_freq, smp_processor_id());
}

@@ -178,6 +189,7 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
	if (!sugov_update_next_freq(sg_policy, time, next_freq))
		return;

	if (use_pelt())
		sg_policy->work_in_progress = true;
	irq_work_queue(&sg_policy->irq_work);
}
@@ -335,7 +347,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
	sg_cpu->max = max;
	sg_cpu->bw_dl = cpu_bw_dl(rq);

	return cpu_util_freq(sg_cpu->cpu, &sg_cpu->walt_load);
	return boosted_cpu_util(sg_cpu->cpu, 0, &sg_cpu->walt_load);
#else
	unsigned long util = boosted_cpu_util(sg_cpu->cpu, cpu_util_rt(rq));

@@ -606,13 +618,14 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
		sg_policy->hispeed_util = hs_util;
	}

	sugov_iowait_apply(sg_cpu, time, &util, &max);
	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
			   sg_policy->policy->cur);

	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
				sg_policy->avg_cap, max, sg_cpu->walt_load.nl,
				sg_cpu->walt_load.pl, flags);

	sugov_iowait_apply(sg_cpu, time, &util, &max);
	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
			   sg_policy->policy->cur);
	sugov_walt_adjust(sg_cpu, &util, &max);
	next_f = get_next_freq(sg_policy, util, max);
	/*
@@ -753,6 +766,7 @@ static void sugov_work(struct kthread_work *work)
	 */
	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
	freq = sg_policy->next_freq;
	if (use_pelt())
		sg_policy->work_in_progress = false;
	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
			   ktime_get_ns());
+3 −2
Original line number Diff line number Diff line
@@ -6005,9 +6005,10 @@ schedtune_task_margin(struct task_struct *task)
}

unsigned long
boosted_cpu_util(int cpu, unsigned long other_util)
boosted_cpu_util(int cpu, unsigned long other_util,
		 struct sched_walt_cpu_load *walt_load)
{
	unsigned long util = cpu_util_cfs(cpu_rq(cpu)) + other_util;
	unsigned long util = cpu_util_freq(cpu, walt_load) + other_util;
	long margin = schedtune_cpu_margin(util, cpu);

	trace_sched_boost_cpu(cpu, util, margin);
+5 −3
Original line number Diff line number Diff line
@@ -2073,8 +2073,10 @@ static inline unsigned long cpu_util_cum(int cpu, int delta)
	return (delta >= capacity) ? capacity : delta;
}


#ifdef CONFIG_SCHED_WALT
extern unsigned long boosted_cpu_util(int cpu, unsigned long other_util,
				      struct sched_walt_cpu_load *walt_load);

u64 freq_policy_load(struct rq *rq);

extern u64 walt_load_reported_window;
@@ -2559,7 +2561,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
					cpu_of(rq)));
	if (data)
		data->func(data, rq_clock(rq), flags);
		data->func(data, sched_ktime_clock(), flags);
}
#else
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
+0 −2
Original line number Diff line number Diff line
@@ -20,8 +20,6 @@ int schedtune_prefer_idle(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);

unsigned long boosted_cpu_util(int cpu, unsigned long other_util);

#else /* CONFIG_SCHED_TUNE */

#define schedtune_cpu_boost(cpu)  0