Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92854f1f authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "cpufreq: schedutil: Queue sugov irq work on policy online cpu"

parents bf02ca46 a1e22684
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -267,7 +267,7 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,

	if (use_pelt())
		sg_policy->work_in_progress = true;
	irq_work_queue(&sg_policy->irq_work);
	sched_irq_work_queue(&sg_policy->irq_work);
}

#define TARGET_LOAD 80
+10 −0
Original line number Diff line number Diff line
@@ -3096,3 +3096,13 @@ struct sched_avg_stats {
	int nr_scaled;
};
extern void sched_get_nr_running_avg(struct sched_avg_stats *stats);

#ifdef CONFIG_SMP
static inline void sched_irq_work_queue(struct irq_work *work)
{
	if (likely(cpu_online(raw_smp_processor_id())))
		irq_work_queue(work);
	else
		irq_work_queue_on(work, cpumask_any(cpu_online_mask));
}
#endif
+2 −2
Original line number Diff line number Diff line
@@ -982,7 +982,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	if (!same_freq_domain(new_cpu, task_cpu(p))) {
		src_rq->notif_pending = true;
		dest_rq->notif_pending = true;
		irq_work_queue(&walt_migration_irq_work);
		sched_irq_work_queue(&walt_migration_irq_work);
	}

	if (is_ed_enabled()) {
@@ -2072,7 +2072,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
	result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
				   rq->window_start);
	if (result == old_window_start)
		irq_work_queue(&walt_cpufreq_irq_work);
		sched_irq_work_queue(&walt_cpufreq_irq_work);
}

/* Reflect task activity on its demand and cpu's busy time statistics */