Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 027cc2e4 authored by Viresh Kumar's avatar Viresh Kumar Committed by Rafael J. Wysocki
Browse files

cpufreq: stats: remove hotplug notifiers



Either CPUs are hot-unplugged or suspend/resume occurs, cpufreq core
will send notifications to cpufreq-stats and stats structure and sysfs
entries would be correctly handled..

And so we don't actually need hotcpu notifiers in cpufreq-stats anymore.
We were only handling cpu hot-unplug events here and that are already
taken care of by POLICY notifiers.

Acked-by: default avatarNicolas Pitre <nico@linaro.org>
Tested-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent fcd7af91
Loading
Loading
Loading
Loading
+2 −36
Original line number Diff line number Diff line
@@ -294,12 +294,9 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
	if (val == CPUFREQ_CREATE_POLICY)
		ret = cpufreq_stats_create_table(policy, table);
	else if (val == CPUFREQ_REMOVE_POLICY) {
		/* This might already be freed by cpu hotplug notifier */
		if (per_cpu(cpufreq_stats_table, cpu)) {
		cpufreq_stats_free_sysfs(cpu);
		cpufreq_stats_free_table(cpu);
	}
	}

	return ret;
}
@@ -340,33 +337,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
	return 0;
}

static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
					       unsigned long action,
					       void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;

	/* Don't free/allocate stats during suspend/resume */
	if (action & CPU_TASKS_FROZEN)
		return 0;

	switch (action) {
	case CPU_DOWN_PREPARE:
		cpufreq_stats_free_sysfs(cpu);
		break;
	case CPU_DEAD:
		cpufreq_stats_free_table(cpu);
		break;
	}
	return NOTIFY_OK;
}

/* priority=1 so this will get called before cpufreq_remove_dev */
static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
	.notifier_call = cpufreq_stat_cpu_callback,
	.priority = 1,
};

static struct notifier_block notifier_policy_block = {
	.notifier_call = cpufreq_stat_notifier_policy
};
@@ -386,14 +356,11 @@ static int __init cpufreq_stats_init(void)
	if (ret)
		return ret;

	register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);

	ret = cpufreq_register_notifier(&notifier_trans_block,
				CPUFREQ_TRANSITION_NOTIFIER);
	if (ret) {
		cpufreq_unregister_notifier(&notifier_policy_block,
				CPUFREQ_POLICY_NOTIFIER);
		unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
		for_each_online_cpu(cpu)
			cpufreq_stats_free_table(cpu);
		return ret;
@@ -409,7 +376,6 @@ static void __exit cpufreq_stats_exit(void)
			CPUFREQ_POLICY_NOTIFIER);
	cpufreq_unregister_notifier(&notifier_trans_block,
			CPUFREQ_TRANSITION_NOTIFIER);
	unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
	for_each_online_cpu(cpu) {
		cpufreq_stats_free_table(cpu);
		cpufreq_stats_free_sysfs(cpu);