Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b10bb9f authored by Junjie Wu's avatar Junjie Wu Committed by Matt Wagantall
Browse files

cpufreq: interactive: Avoid down_read_trylock if down_write() is held



down_read_trylock is not always non-blocking if the same thread calls
down_write() before.

CPU1					CPU2
					down_read()
down_write()
  __down_write_nested()
    schedule()
      __down_read_trylock()
					up_read()
					  acquires sem->wait_lock
					    __rwsem_wake_one_writer()
	tries to lock sem->wait_lock

Now CPU2 is waiting for CPU1's schedule() to complete, while holding
sem->wait_lock. CPU1 needs sem->wait_lock to continue.

This problem only happens after cpufreq_interactive introduced load
change notification that could be called within schedule().

Add a separate flag to ignore notification if current thread is in
middle of down_write(). This avoids attempting to hold sem->wait_lock.
The additional flag doesn't have any side effects because
down_read_trylock() would have failed anyway.

Change-Id: Iff97cac36c170cf6d03f36de695141289c3d6930
Signed-off-by: default avatarJunjie Wu <junjiew@codeaurora.org>
parent 5920d95f
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -54,6 +54,7 @@ struct cpufreq_interactive_cpuinfo {
	u64 local_hvtime; /* per-cpu hispeed_validate_time */
	u64 max_freq_idle_start_time;
	struct rw_semaphore enable_sem;
	bool reject_notification;
	int governor_enabled;
	struct cpufreq_interactive_tunables *cached_tunables;
	int first_cpu;
@@ -758,6 +759,9 @@ static int load_change_callback(struct notifier_block *nb, unsigned long val,
	if (speedchange_task == current)
		return 0;

	if (pcpu->reject_notification)
		return 0;

	if (!down_read_trylock(&pcpu->enable_sem))
		return 0;
	if (!pcpu->governor_enabled) {
@@ -1657,6 +1661,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
				pcpu->floor_validate_time;
			pcpu->local_hvtime = pcpu->floor_validate_time;
			pcpu->max_freq = policy->max;
			pcpu->reject_notification = true;
			down_write(&pcpu->enable_sem);
			del_timer_sync(&pcpu->cpu_timer);
			del_timer_sync(&pcpu->cpu_slack_timer);
@@ -1664,6 +1669,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
			cpufreq_interactive_timer_start(tunables, j);
			pcpu->governor_enabled = 1;
			up_write(&pcpu->enable_sem);
			pcpu->reject_notification = false;
		}

		mutex_unlock(&gov_lock);
@@ -1673,11 +1679,13 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		mutex_lock(&gov_lock);
		for_each_cpu(j, policy->cpus) {
			pcpu = &per_cpu(cpuinfo, j);
			pcpu->reject_notification = true;
			down_write(&pcpu->enable_sem);
			pcpu->governor_enabled = 0;
			del_timer_sync(&pcpu->cpu_timer);
			del_timer_sync(&pcpu->cpu_slack_timer);
			up_write(&pcpu->enable_sem);
			pcpu->reject_notification = false;
		}

		mutex_unlock(&gov_lock);
@@ -1716,11 +1724,13 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
			 */

			if (policy->max > pcpu->max_freq) {
				pcpu->reject_notification = true;
				down_write(&pcpu->enable_sem);
				del_timer_sync(&pcpu->cpu_timer);
				del_timer_sync(&pcpu->cpu_slack_timer);
				cpufreq_interactive_timer_resched(j);
				up_write(&pcpu->enable_sem);
				pcpu->reject_notification = false;
			}

			pcpu->max_freq = policy->max;