Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bff77808 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: walt: Improve the scheduler



This change is for general scheduler improvements.

Change-Id: I4623a59eca688209110c347d388df2a2839a6287
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent a7ee6826
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -265,7 +265,7 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,

	if (use_pelt())
		sg_policy->work_in_progress = true;
	sched_irq_work_queue(&sg_policy->irq_work);
	irq_work_queue(&sg_policy->irq_work);
}

#define TARGET_LOAD 80
+0 −10
Original line number Diff line number Diff line
@@ -3087,13 +3087,3 @@ struct sched_avg_stats {
	int nr_scaled;
};
extern void sched_get_nr_running_avg(struct sched_avg_stats *stats);

#ifdef CONFIG_SMP
static inline void sched_irq_work_queue(struct irq_work *work)
{
	if (likely(cpu_online(raw_smp_processor_id())))
		irq_work_queue(work);
	else
		irq_work_queue_on(work, cpumask_any(cpu_online_mask));
}
#endif
+10 −2
Original line number Diff line number Diff line
@@ -202,6 +202,14 @@ early_param("sched_predl", set_sched_predl);
__read_mostly unsigned int walt_scale_demand_divisor;
#define scale_demand(d) ((d)/walt_scale_demand_divisor)

static inline void walt_irq_work_queue(struct irq_work *work)
{
	if (likely(cpu_online(raw_smp_processor_id())))
		irq_work_queue(work);
	else
		irq_work_queue_on(work, cpumask_any(cpu_online_mask));
}

void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
	inc_nr_big_task(&rq->walt_stats, p);
@@ -971,7 +979,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	if (!same_freq_domain(new_cpu, task_cpu(p))) {
		src_rq->notif_pending = true;
		dest_rq->notif_pending = true;
		sched_irq_work_queue(&walt_migration_irq_work);
		walt_irq_work_queue(&walt_migration_irq_work);
	}

	if (is_ed_enabled()) {
@@ -2061,7 +2069,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
	result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
				   rq->window_start);
	if (result == old_window_start)
		sched_irq_work_queue(&walt_cpufreq_irq_work);
		walt_irq_work_queue(&walt_cpufreq_irq_work);
}

/* Reflect task activity on its demand and cpu's busy time statistics */