Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 672d3eb9 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: fix wrong load_scale_factor/capacity/nr_big/small_tasks"

parents 987b0d1b 9e37153f
Loading
Loading
Loading
Loading
+40 −8
Original line number Diff line number Diff line
@@ -1936,9 +1936,11 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
{
	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
	int i;
	unsigned int min_max = min_max_freq;
	const struct cpumask *cpus = policy->related_cpus;
	int orig_min_max_freq = min_max_freq;
	unsigned int orig_min_max_freq = min_max_freq;
	unsigned int orig_max_possible_freq = max_possible_freq;
	/* Initialized to policy->max in case policy->related_cpus is empty! */
	unsigned int orig_max_freq = policy->max;

	if (val != CPUFREQ_NOTIFY)
		return 0;
@@ -1946,6 +1948,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
	for_each_cpu(i, policy->related_cpus) {
		cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
			     policy->related_cpus);
		orig_max_freq = cpu_rq(i)->max_freq;
		cpu_rq(i)->min_freq = policy->min;
		cpu_rq(i)->max_freq = policy->max;
		cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
@@ -1953,20 +1956,49 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,

	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
	if (min_max_freq == 1)
		min_max = UINT_MAX;
	min_max_freq = min(min_max, policy->cpuinfo.max_freq);
		min_max_freq = UINT_MAX;
	min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
	BUG_ON(!min_max_freq);
	BUG_ON(!policy->max);

	if (min_max_freq != orig_min_max_freq)
		cpus = cpu_online_mask;
	if (orig_max_possible_freq == max_possible_freq &&
		orig_min_max_freq == min_max_freq &&
		orig_max_freq == policy->max)
			return 0;

	/*
	 * A changed min_max_freq or max_possible_freq (possible during bootup)
	 * needs to trigger re-computation of load_scale_factor and capacity for
	 * all possible cpus (even those offline). It also needs to trigger
	 * re-computation of nr_big/small_task count on all online cpus.
	 *
	 * A changed rq->max_freq otoh needs to trigger re-computation of
	 * load_scale_factor and capacity for just the cluster of cpus involved.
	 * Since small task definition depends on max_load_scale_factor, a
	 * changed load_scale_factor of one cluster could influence small_task
	 * classification of tasks in another cluster. Hence a changed
	 * rq->max_freq will need to trigger re-computation of nr_big/small_task
	 * count on all online cpus.
	 *
	 * While it should be sufficient for nr_big/small_tasks to be
	 * re-computed for only online cpus, we have inadequate context
	 * information here (in policy notifier) with regard to hotplug-safety
	 * context in which notification is issued. As a result, we can't use
	 * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
	 * fixed up to issue notification always in hotplug-safe context,
	 * re-compute nr_big/small_task for all possible cpus.
	 */

	if (orig_min_max_freq != min_max_freq ||
		orig_max_possible_freq != max_possible_freq)
			cpus = cpu_possible_mask;

	/*
	 * Changed load_scale_factor can trigger reclassification of tasks as
	 * big or small. Make this change "atomic" so that tasks are accounted
	 * properly due to changed load_scale_factor
	 */
	pre_big_small_task_count_change();
	pre_big_small_task_count_change(cpu_possible_mask);
	for_each_cpu(i, cpus) {
		struct rq *rq = cpu_rq(i);

@@ -1977,7 +2009,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
	}

	update_min_max_capacity();
	post_big_small_task_count_change();
	post_big_small_task_count_change(cpu_possible_mask);

	return 0;
}
+13 −9
Original line number Diff line number Diff line
@@ -1858,13 +1858,13 @@ void fixup_nr_big_small_task(int cpu)
}

/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
void pre_big_small_task_count_change(void)
void pre_big_small_task_count_change(const struct cpumask *cpus)
{
	int i;

	local_irq_disable();

	for_each_online_cpu(i)
	for_each_cpu(i, cpus)
		raw_spin_lock(&cpu_rq(i)->lock);
}

@@ -1872,15 +1872,15 @@ void pre_big_small_task_count_change(void)
 * Reinitialize 'nr_big_tasks' and 'nr_small_tasks' counters on all affected
 * cpus
 */
void post_big_small_task_count_change(void)
void post_big_small_task_count_change(const struct cpumask *cpus)
{
	int i;

	/* Assumes local_irq_disable() keeps online cpumap stable */
	for_each_online_cpu(i)
	for_each_cpu(i, cpus)
		fixup_nr_big_small_task(i);

	for_each_online_cpu(i)
	for_each_cpu(i, cpus)
		raw_spin_unlock(&cpu_rq(i)->lock);

	local_irq_enable();
@@ -1972,15 +1972,19 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	 */
	if ((*data != old_val) &&
		(data == &sysctl_sched_upmigrate_pct ||
		data == &sysctl_sched_small_task_pct))
			pre_big_small_task_count_change();
		data == &sysctl_sched_small_task_pct)) {
			get_online_cpus();
			pre_big_small_task_count_change(cpu_online_mask);
	}

	set_hmp_defaults();

	if ((*data != old_val) &&
		(data == &sysctl_sched_upmigrate_pct ||
		data == &sysctl_sched_small_task_pct))
			post_big_small_task_count_change();
		data == &sysctl_sched_small_task_pct)) {
			post_big_small_task_count_change(cpu_online_mask);
			put_online_cpus();
	}

	return 0;
}
+2 −2
Original line number Diff line number Diff line
@@ -804,8 +804,8 @@ extern unsigned int sched_enable_power_aware;

int mostly_idle_cpu(int cpu);
extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_small_task_count_change(void);
extern void post_big_small_task_count_change(void);
extern void pre_big_small_task_count_change(const struct cpumask *cpus);
extern void post_big_small_task_count_change(const struct cpumask *cpus);
extern void inc_nr_big_small_task(struct rq *rq, struct task_struct *p);
extern void dec_nr_big_small_task(struct rq *rq, struct task_struct *p);
extern void set_hmp_defaults(void);