Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ee80f5d authored by Junjie Wu's avatar Junjie Wu
Browse files

cpufreq: interactive: Pass target_load to scheduler



Scheduler needs to understand governor's target_load in order to make
correct decisions when scheduling tasks.

Change-Id: Ia440986de813632def0352e34425fa69da3b2923
Signed-off-by: default avatarJunjie Wu <junjiew@codeaurora.org>
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent bcd02521
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
@@ -76,6 +76,7 @@ static struct mutex gov_lock;
static int set_window_count;
static int migration_register_count;
static struct mutex sched_lock;
static cpumask_t controlled_cpus;

/* Target load.  Lower values result in higher CPU speeds. */
#define DEFAULT_TARGET_LOAD 90
@@ -291,6 +292,25 @@ static unsigned int freq_to_targetload(
	return ret;
}

#define DEFAULT_MAX_LOAD 100
u32 get_freq_max_load(int cpu, unsigned int freq)
{
	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);

	if (!cpumask_test_cpu(cpu, &controlled_cpus))
		return DEFAULT_MAX_LOAD;

	if (have_governor_per_policy()) {
		if (!ppol || !ppol->cached_tunables)
			return DEFAULT_MAX_LOAD;
		return freq_to_targetload(ppol->cached_tunables, freq);
	}

	if (!cached_common_tunables)
		return DEFAULT_MAX_LOAD;
	return freq_to_targetload(cached_common_tunables, freq);
}

/*
 * If increasing frequencies never map to a lower target load then
 * choose_freq() will find the minimum frequency that does not exceed its
@@ -852,6 +872,9 @@ static ssize_t store_target_loads(
	tunables->target_loads = new_target_loads;
	tunables->ntarget_loads = ntokens;
	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);

	sched_update_freq_max_load(&controlled_cpus);

	return count;
}

@@ -1549,6 +1572,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
			WARN_ON(tunables);
		} else if (tunables) {
			tunables->usage_count++;
			cpumask_or(&controlled_cpus, &controlled_cpus,
				   policy->related_cpus);
			sched_update_freq_max_load(policy->related_cpus);
			policy->governor_data = tunables;
			return 0;
		}
@@ -1586,6 +1612,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		if (tunables->use_sched_load)
			cpufreq_interactive_enable_sched_input(tunables);

		cpumask_or(&controlled_cpus, &controlled_cpus,
			   policy->related_cpus);
		sched_update_freq_max_load(policy->related_cpus);

		if (have_governor_per_policy())
			ppol->cached_tunables = tunables;
		else
@@ -1594,6 +1624,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		break;

	case CPUFREQ_GOV_POLICY_EXIT:
		cpumask_andnot(&controlled_cpus, &controlled_cpus,
			       policy->related_cpus);
		sched_update_freq_max_load(cpu_possible_mask);
		if (!--tunables->usage_count) {
			if (policy->governor->initialized == 1)
				cpufreq_unregister_notifier(&cpufreq_notifier_block,