Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ebb383e authored by Rohit Gupta's avatar Rohit Gupta
Browse files

cpufreq: cpu-boost: Use one work to remove input boost for all CPUs



Currently each CPU queues up its own work for removing input boost.
This is not really required as boost removal for all the CPUs can
be done at the same time. So this patch uses a single work to
remove input boost for all the CPUs and updates the policy for
the online ones.

Change-Id: I37c809f2f155548b1d8c1b9aa7626c8852b3acc6
Signed-off-by: default avatarRohit Gupta <rohgup@codeaurora.org>
parent f79e9342
Loading
Loading
Loading
Loading
+37 −24
Original line number Diff line number Diff line
@@ -30,7 +30,6 @@ struct cpu_sync {
	struct task_struct *thread;
	wait_queue_head_t sync_wq;
	struct delayed_work boost_rem;
	struct delayed_work input_boost_rem;
	int cpu;
	spinlock_t lock;
	bool pending;
@@ -63,6 +62,7 @@ module_param(migration_load_threshold, uint, 0644);
static bool load_based_syncs;
module_param(load_based_syncs, bool, 0644);

static struct delayed_work input_boost_rem;
static u64 last_input_time;
#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)

@@ -194,15 +194,33 @@ static void do_boost_rem(struct work_struct *work)
	cpufreq_update_policy(s->cpu);
}

static void update_policy_online(void)
{
	unsigned int i;

	/* Re-evaluate policy to trigger adjust notifier for online CPUs */
	get_online_cpus();
	for_each_online_cpu(i) {
		pr_debug("Updating policy for CPU%d\n", i);
		cpufreq_update_policy(i);
	}
	put_online_cpus();
}

static void do_input_boost_rem(struct work_struct *work)
{
	struct cpu_sync *s = container_of(work, struct cpu_sync,
						input_boost_rem.work);
	unsigned int i;
	struct cpu_sync *i_sync_info;

	pr_debug("Removing input boost for CPU%d\n", s->cpu);
	s->input_boost_min = 0;
	/* Force policy re-evaluation to trigger adjust notifier. */
	cpufreq_update_policy(s->cpu);
	/* Reset the input_boost_min for all CPUs in the system */
	pr_debug("Resetting input boost min for all CPUs\n");
	for_each_possible_cpu(i) {
		i_sync_info = &per_cpu(sync_info, i);
		i_sync_info->input_boost_min = 0;
	}

	/* Update policies for all online CPUs */
	update_policy_online();
}

static int boost_mig_sync_thread(void *data)
@@ -319,28 +337,23 @@ static struct notifier_block boost_migration_nb = {

static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret;
	unsigned int i;
	struct cpu_sync *i_sync_info;
	struct cpufreq_policy policy;

	get_online_cpus();
	for_each_online_cpu(i) {
	cancel_delayed_work_sync(&input_boost_rem);

	/* Set the input_boost_min for all CPUs in the system */
	pr_debug("Setting input boost min for all CPUs\n");
	for_each_possible_cpu(i) {
		i_sync_info = &per_cpu(sync_info, i);
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;
		if (policy.cur >= i_sync_info->input_boost_freq)
			continue;

		cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
		i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
		cpufreq_update_policy(i);
		queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
			&i_sync_info->input_boost_rem,
			msecs_to_jiffies(input_boost_ms));
	}
	put_online_cpus();

	/* Update policies for all online CPUs */
	update_policy_online();

	queue_delayed_work(cpu_boost_wq, &input_boost_rem,
					msecs_to_jiffies(input_boost_ms));
}

static void cpuboost_input_event(struct input_handle *handle,
@@ -443,6 +456,7 @@ static int cpu_boost_init(void)
		return -EFAULT;

	INIT_WORK(&input_boost_work, do_input_boost);
	INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);

	for_each_possible_cpu(cpu) {
		s = &per_cpu(sync_info, cpu);
@@ -450,7 +464,6 @@ static int cpu_boost_init(void)
		init_waitqueue_head(&s->sync_wq);
		spin_lock_init(&s->lock);
		INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
		INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
		s->thread = kthread_run(boost_mig_sync_thread,
				(void *) (long)cpu, "boost_sync/%d", cpu);
		set_cpus_allowed(s->thread, *cpumask_of(cpu));