Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa1902ab authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "cpufreq: interactive: Pass target_load to scheduler"

parents 62c3481f 8ee80f5d
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
@@ -76,6 +76,7 @@ static struct mutex gov_lock;
static int set_window_count;
static int migration_register_count;
static struct mutex sched_lock;
static cpumask_t controlled_cpus;

/* Target load.  Lower values result in higher CPU speeds. */
#define DEFAULT_TARGET_LOAD 90
@@ -291,6 +292,25 @@ static unsigned int freq_to_targetload(
	return ret;
}

#define DEFAULT_MAX_LOAD 100
u32 get_freq_max_load(int cpu, unsigned int freq)
{
	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);

	if (!cpumask_test_cpu(cpu, &controlled_cpus))
		return DEFAULT_MAX_LOAD;

	if (have_governor_per_policy()) {
		if (!ppol || !ppol->cached_tunables)
			return DEFAULT_MAX_LOAD;
		return freq_to_targetload(ppol->cached_tunables, freq);
	}

	if (!cached_common_tunables)
		return DEFAULT_MAX_LOAD;
	return freq_to_targetload(cached_common_tunables, freq);
}

/*
 * If increasing frequencies never map to a lower target load then
 * choose_freq() will find the minimum frequency that does not exceed its
@@ -852,6 +872,9 @@ static ssize_t store_target_loads(
	tunables->target_loads = new_target_loads;
	tunables->ntarget_loads = ntokens;
	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);

	sched_update_freq_max_load(&controlled_cpus);

	return count;
}

@@ -1549,6 +1572,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
			WARN_ON(tunables);
		} else if (tunables) {
			tunables->usage_count++;
			cpumask_or(&controlled_cpus, &controlled_cpus,
				   policy->related_cpus);
			sched_update_freq_max_load(policy->related_cpus);
			policy->governor_data = tunables;
			return 0;
		}
@@ -1586,6 +1612,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		if (tunables->use_sched_load)
			cpufreq_interactive_enable_sched_input(tunables);

		cpumask_or(&controlled_cpus, &controlled_cpus,
			   policy->related_cpus);
		sched_update_freq_max_load(policy->related_cpus);

		if (have_governor_per_policy())
			ppol->cached_tunables = tunables;
		else
@@ -1594,6 +1624,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		break;

	case CPUFREQ_GOV_POLICY_EXIT:
		cpumask_andnot(&controlled_cpus, &controlled_cpus,
			       policy->related_cpus);
		sched_update_freq_max_load(cpu_possible_mask);
		if (!--tunables->usage_count) {
			if (policy->governor->initialized == 1)
				cpufreq_unregister_notifier(&cpufreq_notifier_block,
+6 −0
Original line number Diff line number Diff line
@@ -1966,6 +1966,7 @@ extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(unsigned long *busy,
				const struct cpumask *query_cpus);
extern void sched_set_io_is_busy(int val);
int sched_update_freq_max_load(const cpumask_t *cpumask);
#else
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
@@ -1978,6 +1979,11 @@ static inline unsigned long sched_get_busy(int cpu)
static inline void sched_get_cpus_busy(unsigned long *busy,
				const struct cpumask *query_cpus) {};
static inline void sched_set_io_is_busy(int val) {};

static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	return 0;
}
#endif

/*
+94 −1
Original line number Diff line number Diff line
@@ -1206,7 +1206,6 @@ __read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */

static __read_mostly unsigned int sched_io_is_busy;

#endif	/* CONFIG_SCHED_FREQ_INPUT */

/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
@@ -1631,6 +1630,78 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,

	BUG();
}

u32 __weak get_freq_max_load(int cpu, u32 freq)
{
	/* 100% by default */
	return 100;
}

DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);

int sched_update_freq_max_load(const cpumask_t *cpumask)
{
	int i, cpu, ret;
	unsigned int freq, max;
	struct cpu_pstate_pwr *costs;
	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
	struct freq_max_load *max_load, *old_max_load;

	if (!per_cpu_info || !sysctl_sched_enable_power_aware)
		return 0;

	mutex_lock(&policy_mutex);
	for_each_cpu(cpu, cpumask) {
		if (!per_cpu_info[cpu].ptable) {
			ret = -EINVAL;
			goto fail;
		}

		old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));

		/*
		 * allocate len + 1 and leave the last power cost as 0 for
		 * power_cost_at_freq() can stop iterating index when
		 * per_cpu_info[cpu].len > len of max_load due to race between
		 * cpu power stats update and get_cpu_pwr_stats().
		 */
		max_load = kzalloc(sizeof(struct freq_max_load) +
				   sizeof(u32) * (per_cpu_info[cpu].len + 1),
				   GFP_ATOMIC);
		if (unlikely(!max_load)) {
			ret = -ENOMEM;
			goto fail;
		}

		i = 0;
		costs = per_cpu_info[cpu].ptable;
		while (costs[i].freq) {
			freq = costs[i].freq;
			max = get_freq_max_load(cpu, freq);
			max_load->freqs[i] = div64_u64((u64)freq * max, 100);
			i++;
		}

		rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
		if (old_max_load)
			kfree_rcu(old_max_load, rcu);
	}

	mutex_unlock(&policy_mutex);
	return 0;

fail:
	for_each_cpu(cpu, cpumask) {
		max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
		if (max_load) {
			rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
			kfree_rcu(max_load, rcu);
		}
	}

	mutex_unlock(&policy_mutex);
	return ret;
}
#else	/* CONFIG_SCHED_FREQ_INPUT */

static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
@@ -2601,6 +2672,17 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
	return 0;
}

static int pwr_stats_ready_notifier(struct notifier_block *nb,
				    unsigned long cpu, void *data)
{
	cpumask_t mask = CPU_MASK_NONE;

	cpumask_set_cpu(cpu, &mask);
	sched_update_freq_max_load(&mask);

	return 0;
}

static struct notifier_block notifier_policy_block = {
	.notifier_call = cpufreq_notifier_policy
};
@@ -2609,6 +2691,15 @@ static struct notifier_block notifier_trans_block = {
	.notifier_call = cpufreq_notifier_trans
};

static struct notifier_block notifier_pwr_stats_ready = {
	.notifier_call = pwr_stats_ready_notifier
};

int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
{
	return -EINVAL;
}

static int register_sched_callback(void)
{
	int ret;
@@ -2623,6 +2714,8 @@ static int register_sched_callback(void)
		ret = cpufreq_register_notifier(&notifier_trans_block,
						CPUFREQ_TRANSITION_NOTIFIER);

	register_cpu_pwr_stats_ready_notifier(&notifier_pwr_stats_ready);

	return 0;
}

+9 −2
Original line number Diff line number Diff line
@@ -2830,6 +2830,7 @@ static unsigned int power_cost_at_freq(int cpu, unsigned int freq)
	int i = 0;
	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
	struct cpu_pstate_pwr *costs;
	struct freq_max_load *max_load;

	if (!per_cpu_info || !per_cpu_info[cpu].ptable ||
	    !sysctl_sched_enable_power_aware)
@@ -2842,12 +2843,18 @@ static unsigned int power_cost_at_freq(int cpu, unsigned int freq)

	costs = per_cpu_info[cpu].ptable;

	rcu_read_lock();
	max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
	while (costs[i].freq != 0) {
		if (costs[i].freq >= freq ||
		    costs[i+1].freq == 0)
		if (costs[i+1].freq == 0 ||
		    (costs[i].freq >= freq &&
		     (!max_load || max_load->freqs[i] >= freq))) {
			rcu_read_unlock();
			return costs[i].power;
		}
		i++;
	}
	rcu_read_unlock();
	BUG();
}

+7 −0
Original line number Diff line number Diff line
@@ -25,6 +25,13 @@ extern __read_mostly int scheduler_running;
extern unsigned long calc_load_update;
extern atomic_long_t calc_load_tasks;

struct freq_max_load {
	struct rcu_head rcu;
	u32 freqs[0];
};

extern DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);

extern long calc_load_fold_active(struct rq *this_rq);
extern void update_cpu_load_active(struct rq *this_rq);