Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d3370500 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: walt: refactor the walt stats update code



Each scheduling class has its own wrappers to update/fixup
the walt stats. Move the update/fixup logic to a common
place (walt.c and walt.h).

Change-Id: I405874ad7bb537d87740ad9f0d1f6e72fc26fd8b
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 84f72d7e
Loading
Loading
Loading
Loading
+3 −38
Original line number Diff line number Diff line
@@ -19,41 +19,6 @@

#include <linux/slab.h>

#ifdef CONFIG_SCHED_WALT

static void
inc_walt_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->walt_stats, p);
}

static void
dec_walt_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->walt_stats, p);
}

static void
fixup_walt_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->walt_stats, p, task_load_delta,
				      pred_demand_delta);
}

#else	/* CONFIG_SCHED_WALT */

static inline void
inc_walt_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

static inline void
dec_walt_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_WALT */

struct dl_bandwidth def_dl_bandwidth;

static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -865,7 +830,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
	WARN_ON(!dl_prio(prio));
	dl_rq->dl_nr_running++;
	add_nr_running(rq_of_dl_rq(dl_rq), 1);
	inc_walt_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
	walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));

	inc_dl_deadline(dl_rq, deadline);
	inc_dl_migration(dl_se, dl_rq);
@@ -880,7 +845,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
	WARN_ON(!dl_rq->dl_nr_running);
	dl_rq->dl_nr_running--;
	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
	dec_walt_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
	walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));

	dec_dl_deadline(dl_rq, dl_se->deadline);
	dec_dl_migration(dl_se, dl_rq);
@@ -1845,7 +1810,7 @@ const struct sched_class dl_sched_class = {

	.update_curr		= update_curr_dl,
#ifdef CONFIG_SCHED_WALT
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_dl,
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
#endif
};

+4 −17
Original line number Diff line number Diff line
@@ -38,8 +38,6 @@
#include <trace/events/sched.h>

#ifdef CONFIG_SCHED_WALT
static void fixup_walt_sched_stats_fair(struct rq *rq, struct task_struct *p,
				       u32 new_task_load, u32 new_pred_demand);
static inline bool task_fits_max(struct task_struct *p, int cpu);
#endif

@@ -4658,7 +4656,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
#ifdef CONFIG_SCHED_WALT
		p->misfit = !task_fits_max(p, rq->cpu);
#endif
		inc_rq_walt_stats(rq, p, 1);
		inc_rq_walt_stats(rq, p);
	}

#ifdef CONFIG_SMP
@@ -4743,7 +4741,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

	if (!se) {
		sub_nr_running(rq, 1);
		dec_rq_walt_stats(rq, p, 1);
		dec_rq_walt_stats(rq, p);
	}

#ifdef CONFIG_SMP
@@ -10393,7 +10391,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
	rq->misfit_task = misfit;

	if (old_misfit != misfit) {
		adjust_nr_big_tasks(&rq->walt_stats, 1, misfit);
		walt_adjust_nr_big_tasks(rq, 1, misfit);
		curr->misfit = misfit;
	}
#endif
@@ -10858,7 +10856,7 @@ const struct sched_class fair_sched_class = {
	.task_change_group	= task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_WALT
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_fair,
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
#endif
};

@@ -10910,17 +10908,6 @@ __init void init_sched_fair_class(void)
/* WALT sched implementation begins here */
#ifdef CONFIG_SCHED_WALT

static void
fixup_walt_sched_stats_fair(struct rq *rq, struct task_struct *p,
			   u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->walt_stats, p, task_load_delta,
				      pred_demand_delta);
}

static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
+0 −11
Original line number Diff line number Diff line
@@ -78,14 +78,6 @@ static void update_curr_idle(struct rq *rq)
{
}

#ifdef CONFIG_SCHED_WALT
static void
fixup_walt_sched_stats_idle(struct rq *rq, struct task_struct *p,
			   u32 new_task_load, u32 new_pred_demand)
{
}
#endif

/*
 * Simple, special scheduling class for the per-CPU idle tasks:
 */
@@ -114,7 +106,4 @@ const struct sched_class idle_sched_class = {
	.prio_changed		= prio_changed_idle,
	.switched_to		= switched_to_idle,
	.update_curr		= update_curr_idle,
#ifdef CONFIG_SCHED_WALT
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_idle,
#endif
};
+3 −42
Original line number Diff line number Diff line
@@ -11,45 +11,6 @@
#include <linux/irq_work.h>
#include <trace/events/sched.h>

#ifdef CONFIG_SCHED_WALT

static void
inc_walt_sched_stats_rt(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->walt_stats, p);
}

static void
dec_walt_sched_stats_rt(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->walt_stats, p);
}

static void
fixup_walt_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->walt_stats, p, task_load_delta,
				      pred_demand_delta);
}

#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

#endif /* CONFIG_SMP */
#else  /* CONFIG_SCHED_WALT */

static inline void
inc_walt_sched_stats_rt(struct rq *rq, struct task_struct *p) { }

static inline void
dec_walt_sched_stats_rt(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_WALT */

#include "walt.h"

int sched_rr_timeslice = RR_TIMESLICE;
@@ -1421,7 +1382,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
		rt_se->timeout = 0;

	enqueue_rt_entity(rt_se, flags);
	inc_walt_sched_stats_rt(rq, p);
	walt_inc_cumulative_runnable_avg(rq, p);

	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
		enqueue_pushable_task(rq, p);
@@ -1433,7 +1394,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)

	update_curr_rt(rq);
	dequeue_rt_entity(rt_se, flags);
	dec_walt_sched_stats_rt(rq, p);
	walt_dec_cumulative_runnable_avg(rq, p);

	dequeue_pushable_task(rq, p);
}
@@ -2623,7 +2584,7 @@ const struct sched_class rt_sched_class = {

	.update_curr		= update_curr_rt,
#ifdef CONFIG_SCHED_WALT
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_rt,
	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
#endif
};

+0 −10
Original line number Diff line number Diff line
@@ -2565,10 +2565,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
extern int sched_boost(void);
extern int preferred_cluster(struct sched_cluster *cluster,
						struct task_struct *p);
extern void inc_rq_walt_stats(struct rq *rq,
				struct task_struct *p, int change_cra);
extern void dec_rq_walt_stats(struct rq *rq,
				struct task_struct *p, int change_cra);
extern struct sched_cluster *rq_cluster(struct rq *rq);
extern void reset_task_stats(struct task_struct *p);
extern void clear_top_tasks_bitmap(unsigned long *bitmap);
@@ -2686,12 +2682,6 @@ static inline int sched_boost(void)

static inline bool is_max_capacity_cpu(int cpu) { return true; }

static inline void
inc_rq_walt_stats(struct rq *rq, struct task_struct *p, int change_cra) { }

static inline void
dec_rq_walt_stats(struct rq *rq, struct task_struct *p, int change_cra) { }

static inline int
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
{
Loading