Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d58bb95a authored by Badhri Jagan Sridharan's avatar Badhri Jagan Sridharan Committed by Amit Pundir
Browse files

cpufreq: interactive: restructure CPUFREQ_GOV_LIMITS



The cpufreq_interactive_timer gets cancelled and rescheduled
whenever the cpufreq_policy is changed. When the cpufreq policy is
changed at a rate faster than the sampling_rate of the interactive
governor, then the governor misses to change the target frequency
for long duration. The patch removes the need of cancelling the
timers when policy->min is changed.

Signed-off-by: default avatarBadhri Jagan Sridharan <Badhri@google.com>
Change-Id: Ibd98d151e1c73b8bd969484583ff98ee9f1135ef
parent e2c6c884
Loading
Loading
Loading
Loading
+35 −13
Original line number Original line Diff line number Diff line
@@ -44,8 +44,10 @@ struct cpufreq_interactive_cpuinfo {
	u64 cputime_speedadj_timestamp;
	u64 cputime_speedadj_timestamp;
	struct cpufreq_policy *policy;
	struct cpufreq_policy *policy;
	struct cpufreq_frequency_table *freq_table;
	struct cpufreq_frequency_table *freq_table;
	spinlock_t target_freq_lock; /*protects target freq */
	unsigned int target_freq;
	unsigned int target_freq;
	unsigned int floor_freq;
	unsigned int floor_freq;
	unsigned int max_freq;
	u64 floor_validate_time;
	u64 floor_validate_time;
	u64 hispeed_validate_time;
	u64 hispeed_validate_time;
	struct rw_semaphore enable_sem;
	struct rw_semaphore enable_sem;
@@ -358,6 +360,7 @@ static void cpufreq_interactive_timer(unsigned long data)
	if (WARN_ON_ONCE(!delta_time))
	if (WARN_ON_ONCE(!delta_time))
		goto rearm;
		goto rearm;


	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
	do_div(cputime_speedadj, delta_time);
	do_div(cputime_speedadj, delta_time);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;
	cpu_load = loadadjfreq / pcpu->target_freq;
@@ -383,6 +386,7 @@ static void cpufreq_interactive_timer(unsigned long data)
		trace_cpufreq_interactive_notyet(
		trace_cpufreq_interactive_notyet(
			data, cpu_load, pcpu->target_freq,
			data, cpu_load, pcpu->target_freq,
			pcpu->policy->cur, new_freq);
			pcpu->policy->cur, new_freq);
		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
		goto rearm;
		goto rearm;
	}
	}


@@ -390,8 +394,10 @@ static void cpufreq_interactive_timer(unsigned long data)


	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
					   new_freq, CPUFREQ_RELATION_L,
					   new_freq, CPUFREQ_RELATION_L,
					   &index))
					   &index)) {
		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
		goto rearm;
		goto rearm;
	}


	new_freq = pcpu->freq_table[index].frequency;
	new_freq = pcpu->freq_table[index].frequency;


@@ -405,6 +411,7 @@ static void cpufreq_interactive_timer(unsigned long data)
			trace_cpufreq_interactive_notyet(
			trace_cpufreq_interactive_notyet(
				data, cpu_load, pcpu->target_freq,
				data, cpu_load, pcpu->target_freq,
				pcpu->policy->cur, new_freq);
				pcpu->policy->cur, new_freq);
			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
			goto rearm;
			goto rearm;
		}
		}
	}
	}
@@ -426,6 +433,7 @@ static void cpufreq_interactive_timer(unsigned long data)
		trace_cpufreq_interactive_already(
		trace_cpufreq_interactive_already(
			data, cpu_load, pcpu->target_freq,
			data, cpu_load, pcpu->target_freq,
			pcpu->policy->cur, new_freq);
			pcpu->policy->cur, new_freq);
		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
		goto rearm_if_notmax;
		goto rearm_if_notmax;
	}
	}


@@ -433,6 +441,7 @@ static void cpufreq_interactive_timer(unsigned long data)
					 pcpu->policy->cur, new_freq);
					 pcpu->policy->cur, new_freq);


	pcpu->target_freq = new_freq;
	pcpu->target_freq = new_freq;
	spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
	cpumask_set_cpu(data, &speedchange_cpumask);
	cpumask_set_cpu(data, &speedchange_cpumask);
	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
@@ -576,16 +585,17 @@ static void cpufreq_interactive_boost(void)
{
{
	int i;
	int i;
	int anyboost = 0;
	int anyboost = 0;
	unsigned long flags;
	unsigned long flags[2];
	struct cpufreq_interactive_cpuinfo *pcpu;
	struct cpufreq_interactive_cpuinfo *pcpu;
	struct cpufreq_interactive_tunables *tunables;
	struct cpufreq_interactive_tunables *tunables;


	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);


	for_each_online_cpu(i) {
	for_each_online_cpu(i) {
		pcpu = &per_cpu(cpuinfo, i);
		pcpu = &per_cpu(cpuinfo, i);
		tunables = pcpu->policy->governor_data;
		tunables = pcpu->policy->governor_data;


		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
		if (pcpu->target_freq < tunables->hispeed_freq) {
		if (pcpu->target_freq < tunables->hispeed_freq) {
			pcpu->target_freq = tunables->hispeed_freq;
			pcpu->target_freq = tunables->hispeed_freq;
			cpumask_set_cpu(i, &speedchange_cpumask);
			cpumask_set_cpu(i, &speedchange_cpumask);
@@ -601,9 +611,10 @@ static void cpufreq_interactive_boost(void)


		pcpu->floor_freq = tunables->hispeed_freq;
		pcpu->floor_freq = tunables->hispeed_freq;
		pcpu->floor_validate_time = ktime_to_us(ktime_get());
		pcpu->floor_validate_time = ktime_to_us(ktime_get());
		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
	}
	}


	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);


	if (anyboost)
	if (anyboost)
		wake_up_process(speedchange_task);
		wake_up_process(speedchange_task);
@@ -1114,6 +1125,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
	struct cpufreq_interactive_cpuinfo *pcpu;
	struct cpufreq_interactive_cpuinfo *pcpu;
	struct cpufreq_frequency_table *freq_table;
	struct cpufreq_frequency_table *freq_table;
	struct cpufreq_interactive_tunables *tunables;
	struct cpufreq_interactive_tunables *tunables;
	unsigned long flags;


	if (have_governor_per_policy())
	if (have_governor_per_policy())
		tunables = policy->governor_data;
		tunables = policy->governor_data;
@@ -1215,6 +1227,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
				ktime_to_us(ktime_get());
				ktime_to_us(ktime_get());
			pcpu->hispeed_validate_time =
			pcpu->hispeed_validate_time =
				pcpu->floor_validate_time;
				pcpu->floor_validate_time;
			pcpu->max_freq = policy->max;
			down_write(&pcpu->enable_sem);
			down_write(&pcpu->enable_sem);
			del_timer_sync(&pcpu->cpu_timer);
			del_timer_sync(&pcpu->cpu_timer);
			del_timer_sync(&pcpu->cpu_slack_timer);
			del_timer_sync(&pcpu->cpu_slack_timer);
@@ -1250,30 +1263,38 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		for_each_cpu(j, policy->cpus) {
		for_each_cpu(j, policy->cpus) {
			pcpu = &per_cpu(cpuinfo, j);
			pcpu = &per_cpu(cpuinfo, j);


			/* hold write semaphore to avoid race */
			down_read(&pcpu->enable_sem);
			down_write(&pcpu->enable_sem);
			if (pcpu->governor_enabled == 0) {
			if (pcpu->governor_enabled == 0) {
				up_write(&pcpu->enable_sem);
				up_read(&pcpu->enable_sem);
				continue;
				continue;
			}
			}


			/* update target_freq firstly */
			spin_lock_irqsave(&pcpu->target_freq_lock, flags);
			if (policy->max < pcpu->target_freq)
			if (policy->max < pcpu->target_freq)
				pcpu->target_freq = policy->max;
				pcpu->target_freq = policy->max;
			else if (policy->min > pcpu->target_freq)
			else if (policy->min > pcpu->target_freq)
				pcpu->target_freq = policy->min;
				pcpu->target_freq = policy->min;


			/* Reschedule timer.
			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
			up_read(&pcpu->enable_sem);

			/* Reschedule timer only if policy->max is raised.
			 * Delete the timers, else the timer callback may
			 * Delete the timers, else the timer callback may
			 * return without re-arm the timer when failed
			 * return without re-arm the timer when failed
			 * acquire the semaphore. This race may cause timer
			 * acquire the semaphore. This race may cause timer
			 * stopped unexpectedly.
			 * stopped unexpectedly.
			 */
			 */

			if (policy->max > pcpu->max_freq) {
				down_write(&pcpu->enable_sem);
				del_timer_sync(&pcpu->cpu_timer);
				del_timer_sync(&pcpu->cpu_timer);
				del_timer_sync(&pcpu->cpu_slack_timer);
				del_timer_sync(&pcpu->cpu_slack_timer);
				cpufreq_interactive_timer_start(tunables, j);
				cpufreq_interactive_timer_start(tunables, j);
				up_write(&pcpu->enable_sem);
				up_write(&pcpu->enable_sem);
			}
			}

			pcpu->max_freq = policy->max;
		}
		break;
		break;
	}
	}
	return 0;
	return 0;
@@ -1308,6 +1329,7 @@ static int __init cpufreq_interactive_init(void)
		init_timer(&pcpu->cpu_slack_timer);
		init_timer(&pcpu->cpu_slack_timer);
		pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
		pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
		spin_lock_init(&pcpu->load_lock);
		spin_lock_init(&pcpu->load_lock);
		spin_lock_init(&pcpu->target_freq_lock);
		init_rwsem(&pcpu->enable_sem);
		init_rwsem(&pcpu->enable_sem);
	}
	}