Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4447266b authored by Viresh Kumar's avatar Viresh Kumar Committed by Rafael J. Wysocki
Browse files

cpufreq: governors: Remove code redundancy between governors



With the inclusion of following patches:

9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary
772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary

code redundancy between the conservative and ondemand governors is
introduced again, so get rid of it.

[rjw: Changelog]
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Tested-by: default avatarFabio Baltieri <fabio.baltieri@linaro.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 8eeed095
Loading
Loading
Loading
Loading
+9 −43
Original line number Diff line number Diff line
@@ -111,58 +111,24 @@ static void cs_check_cpu(int cpu, unsigned int load)
	}
}

static void cs_timer_update(struct cs_cpu_dbs_info_s *dbs_info, bool sample,
			    struct delayed_work *dw)
static void cs_dbs_timer(struct work_struct *work)
{
	struct delayed_work *dw = to_delayed_work(work);
	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
			struct cs_cpu_dbs_info_s, cdbs.work.work);
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
			cpu);
	int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);

	if (sample)
	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
	if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
		dbs_check_cpu(&cs_dbs_data, cpu);

	schedule_delayed_work_on(smp_processor_id(), dw, delay);
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}

static void cs_timer_coordinated(struct cs_cpu_dbs_info_s *dbs_info_local,
				 struct delayed_work *dw)
{
	struct cs_cpu_dbs_info_s *dbs_info;
	ktime_t time_now;
	s64 delta_us;
	bool sample = true;

	/* use leader CPU's dbs_info */
	dbs_info = &per_cpu(cs_cpu_dbs_info,
			    dbs_info_local->cdbs.cur_policy->cpu);
	mutex_lock(&dbs_info->cdbs.timer_mutex);

	time_now = ktime_get();
	delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);

	/* Do nothing if we recently have sampled */
	if (delta_us < (s64)(cs_tuners.sampling_rate / 2))
		sample = false;
	else
		dbs_info->cdbs.time_stamp = time_now;

	cs_timer_update(dbs_info, sample, dw);
	mutex_unlock(&dbs_info->cdbs.timer_mutex);
}

static void cs_dbs_timer(struct work_struct *work)
{
	struct delayed_work *dw = to_delayed_work(work);
	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
			struct cs_cpu_dbs_info_s, cdbs.work.work);

	if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
		cs_timer_coordinated(dbs_info, dw);
	} else {
		mutex_lock(&dbs_info->cdbs.timer_mutex);
		cs_timer_update(dbs_info, true, dw);
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
	}
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
		void *data)
{
+19 −0
Original line number Diff line number Diff line
@@ -177,6 +177,25 @@ static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
	cancel_delayed_work_sync(&cdbs->work);
}

/* Will return if we need to evaluate cpu load again or not */
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
		unsigned int sampling_rate)
{
	if (policy_is_shared(cdbs->cur_policy)) {
		ktime_t time_now = ktime_get();
		s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);

		/* Do nothing if we recently have sampled */
		if (delta_us < (s64)(sampling_rate / 2))
			return false;
		else
			cdbs->time_stamp = time_now;
	}

	return true;
}
EXPORT_SYMBOL_GPL(need_load_eval);

int cpufreq_governor_dbs(struct dbs_data *dbs_data,
		struct cpufreq_policy *policy, unsigned int event)
{
+2 −0
Original line number Diff line number Diff line
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)

u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
		unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
		struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
+23 −54
Original line number Diff line number Diff line
@@ -216,75 +216,44 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
	}
}

static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample,
			    struct delayed_work *dw)
static void od_dbs_timer(struct work_struct *work)
{
	struct delayed_work *dw = to_delayed_work(work);
	struct od_cpu_dbs_info_s *dbs_info =
		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
	int delay, sample_type = dbs_info->sample_type;
	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
			cpu);
	int delay, sample_type = core_dbs_info->sample_type;
	bool eval_load;

	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
	eval_load = need_load_eval(&core_dbs_info->cdbs,
			od_tuners.sampling_rate);

	/* Common NORMAL_SAMPLE setup */
	dbs_info->sample_type = OD_NORMAL_SAMPLE;
	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
	if (sample_type == OD_SUB_SAMPLE) {
		delay = dbs_info->freq_lo_jiffies;
		if (sample)
			__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
						dbs_info->freq_lo,
		delay = core_dbs_info->freq_lo_jiffies;
		if (eval_load)
			__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
						core_dbs_info->freq_lo,
						CPUFREQ_RELATION_H);
	} else {
		if (sample)
		if (eval_load)
			dbs_check_cpu(&od_dbs_data, cpu);
		if (dbs_info->freq_lo) {
		if (core_dbs_info->freq_lo) {
			/* Setup timer for SUB_SAMPLE */
			dbs_info->sample_type = OD_SUB_SAMPLE;
			delay = dbs_info->freq_hi_jiffies;
			core_dbs_info->sample_type = OD_SUB_SAMPLE;
			delay = core_dbs_info->freq_hi_jiffies;
		} else {
			delay = delay_for_sampling_rate(od_tuners.sampling_rate
						* dbs_info->rate_mult);
						* core_dbs_info->rate_mult);
		}
	}

	schedule_delayed_work_on(smp_processor_id(), dw, delay);
}

static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
				 struct delayed_work *dw)
{
	struct od_cpu_dbs_info_s *dbs_info;
	ktime_t time_now;
	s64 delta_us;
	bool sample = true;

	/* use leader CPU's dbs_info */
	dbs_info = &per_cpu(od_cpu_dbs_info,
			    dbs_info_local->cdbs.cur_policy->cpu);
	mutex_lock(&dbs_info->cdbs.timer_mutex);

	time_now = ktime_get();
	delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);

	/* Do nothing if we recently have sampled */
	if (delta_us < (s64)(od_tuners.sampling_rate / 2))
		sample = false;
	else
		dbs_info->cdbs.time_stamp = time_now;

	od_timer_update(dbs_info, sample, dw);
	mutex_unlock(&dbs_info->cdbs.timer_mutex);
}

static void od_dbs_timer(struct work_struct *work)
{
	struct delayed_work *dw = to_delayed_work(work);
	struct od_cpu_dbs_info_s *dbs_info =
		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);

	if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
		od_timer_coordinated(dbs_info, dw);
	} else {
		mutex_lock(&dbs_info->cdbs.timer_mutex);
		od_timer_update(dbs_info, true, dw);
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
	}
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}

/************************** sysfs interface ************************/